path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104126055/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import GaussianNB classifier3 = GaussianNB() classifier3.fit(X_train, Y_train) Y_pred3 = classifier3.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_pred3) * 100, '%')
code
104126055/cell_23
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier classifier2 = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=1) classifier2.fit(X_train, Y_train) Y_pred2 = classifier2.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_pred2) * 100, '%')
code
104126055/cell_33
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from sklearn.metrics import accuracy_score from catboost import CatBoostClassifier classifier_cb = CatBoostClassifier() classifier_cb.fit(X_train, Y_train) Y_predcb = classifier_cb.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_predcb) * 100, '%')
code
104126055/cell_29
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier classifier4 = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier4.fit(X_train, Y_train) Y_pred4 = classifier4.predict(X_test) from sklearn.ensemble import RandomForestClassifier classifier5 = RandomForestClassifier(n_estimators=10, criterion='entropy', random_state=0) classifier5.fit(X_train, Y_train) Y_pred5 = classifier5.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_pred4) * 100, '%')
code
104126055/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset['Cholesterol'].unique()
code
104126055/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104126055/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset.info()
code
104126055/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset.describe()
code
104126055/cell_17
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset.replace(to_replace=['drugA', 'drugB', 'drugC', 'drugX', 'DrugY'], value=[0, 1, 2, 3, 4], inplace=True) X = dataset.iloc[:, :-1].values Y = dataset.iloc[:, -1].values from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() ct = ColumnTransformer(transformers=[('encoder', ohe, [2])], remainder='passthrough') X = np.array(ct.fit_transform(X)) X[0]
code
104126055/cell_31
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from xgboost import XGBClassifier from xgboost import XGBClassifier classifier_xg = XGBClassifier() classifier_xg.fit(X_train, Y_train) Y_predxg = classifier_xg.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_predxg) * 100, '%')
code
104126055/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset['BP'].unique()
code
104126055/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier classifier4 = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier4.fit(X_train, Y_train) Y_pred4 = classifier4.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_pred4) * 100, '%')
code
104126055/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset['Drug'].unique()
code
104126055/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset.head()
code
32073217/cell_6
[ "image_output_2.png", "image_output_1.png" ]
from fipy import Variable, FaceVariable, CellVariable, Grid1D, ExplicitDiffusionTerm, TransientTerm, DiffusionTerm, Viewer from fipy.tools import numerix nx = 50 Lx = 1.0 dx = Lx / nx mesh = Grid1D(Lx=Lx, dx=dx) x = mesh.cellCenters[0] T = CellVariable(name='solution variable', mesh=mesh, value=0.0) T.setValue(0) a = 1.0 valueLeft = 1 valueRight = 0 T.constrain(valueRight, mesh.facesRight) T.constrain(valueLeft, mesh.facesLeft) timeStepDuration = 0.9 * dx ** 2 / (2 * a) steps = 100 t_final = timeStepDuration * steps eqX = TransientTerm() == ExplicitDiffusionTerm(coeff=a) T_Analytical = CellVariable(name='analytical value', mesh=mesh) T_Analytical.setValue(1 - erf(x / (2 * numerix.sqrt(a * t_final)))) viewer = Viewer(vars=(T, T_Analytical)) for step in range(steps): eqX.solve(var=T, dt=timeStepDuration) if __name__ == '__main__': viewer.plot('1Dtransient_%0d.png' % step)
code
32073217/cell_1
[ "text_plain_output_1.png" ]
!pip install ht; !pip install future; !pip install fipy import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from fipy import Variable, FaceVariable, CellVariable, Grid1D, ExplicitDiffusionTerm, TransientTerm, DiffusionTerm, Viewer from fipy.tools import numerix from scipy.special import erf # doctest: +SCIPY
code
32073217/cell_10
[ "image_output_2.png", "image_output_1.png" ]
from fipy import Variable, FaceVariable, CellVariable, Grid1D, ExplicitDiffusionTerm, TransientTerm, DiffusionTerm, Viewer from fipy.tools import numerix nx = 50 Lx = 1.0 dx = Lx / nx mesh = Grid1D(Lx=Lx, dx=dx) x = mesh.cellCenters[0] T = CellVariable(name='solution variable', mesh=mesh, value=0.0) T.setValue(0) a = 1.0 valueLeft = 1 valueRight = 0 T.constrain(valueRight, mesh.facesRight) T.constrain(valueLeft, mesh.facesLeft) timeStepDuration = 0.9 * dx ** 2 / (2 * a) steps = 100 t_final = timeStepDuration * steps eqX = TransientTerm() == ExplicitDiffusionTerm(coeff=a) T_Analytical = CellVariable(name='analytical value', mesh=mesh) T_Analytical.setValue(1 - erf(x / (2 * numerix.sqrt(a * t_final)))) nx = 50 Lx = 1.0 dx = Lx / nx mesh = Grid1D(Lx=Lx, dx=dx) phi2 = CellVariable(name='solution variable', mesh=mesh) phi2.setValue(0) x = mesh.cellCenters[0] time = Variable() D = 1.0 valueLeft = 1 + 0.5 * (1 + numerix.sin(0.5 * time)) fluxRight = -0.5 phi2.constrain(valueLeft, mesh.facesLeft) phi2.faceGrad.constrain([fluxRight], mesh.facesRight) eqI = TransientTerm() == DiffusionTerm(coeff=D) phiAnalytical = CellVariable(name='analytical value', mesh=mesh) viewer2 = Viewer(vars=phi2, datamin=0, datamax=3) dt = 0.5 while time() < 50: time.setValue(time() + dt) eqI.solve(var=phi2, dt=dt) if __name__ == '__main__': viewer2.plot()
code
106207999/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) plot_nmr_features(data)
code
106207999/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() sb.pairplot(df)
code
106207999/cell_25
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) print('Categorical Features:', cat_features) print('Numerical Features:', nmr_features)
code
106207999/cell_33
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() model_evaluation(x_under, y_under)
code
106207999/cell_20
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') no_stroke = df['stroke'].value_counts()[0] / len(df['stroke']) * 100 had_stroke = df['stroke'].value_counts()[1] / len(df['stroke']) * 100 df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) print(data['stroke'].value_counts()) no_stroke = data['stroke'].value_counts()[0] / len(data['stroke']) * 100 had_stroke = data['stroke'].value_counts()[1] / len(data['stroke']) * 100 print('\nRatio of the people who had no stroke: %.2f%%' % no_stroke) print('Ratio of the people who had stroke: %.2f%%' % had_stroke)
code
106207999/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df.info()
code
106207999/cell_29
[ "image_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts()
code
106207999/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) data.head()
code
106207999/cell_41
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() h_parameters = {'max_features': ['sqrt', 'log2'], 'ccp_alpha': [0.1, 0.01, 0.001], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']} model = DecisionTreeClassifier() kfold = KFold(n_splits=5, shuffle=True, random_state=101) grid = GridSearchCV(estimator=model, param_grid=h_parameters, cv=kfold) grid.fit(x_train, y_train) print('Best score:', grid.best_score_) print('Best parameters: ', grid.best_params_)
code
106207999/cell_2
[ "image_output_1.png" ]
import os import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106207999/cell_19
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) data.describe(include='all')
code
106207999/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.info()
code
106207999/cell_45
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() h_parameters = {'max_features': ['sqrt', 'log2'], 'ccp_alpha': [0.1, 0.01, 0.001], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']} model = DecisionTreeClassifier() kfold = KFold(n_splits=5, shuffle=True, random_state=101) grid = GridSearchCV(estimator=model, param_grid=h_parameters, cv=kfold) grid.fit(x_train, y_train) model = DecisionTreeClassifier(max_features='log2', max_depth=8, criterion='gini', ccp_alpha=0.001) model.fit(x_train, y_train) y_hat = model.predict(x_test) from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix print('Accuracy score: %.1f%%' % (accuracy_score(y_test, y_hat) * 100)) print('Precision score: %.3f' % precision_score(y_test, y_hat)) print('Recall: %.3f' % recall_score(y_test, y_hat)) print('F1-Score: %.3f' % f1_score(y_test, y_hat))
code
106207999/cell_28
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) print('x under sampled shape: ', x_under.shape) print('y under sampled shape: ', y_under.shape)
code
106207999/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.describe(include='all')
code
106207999/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df.head()
code
106207999/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum()
code
106207999/cell_35
[ "text_html_output_1.png" ]
from imblearn.over_sampling import SMOTE from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() from imblearn.over_sampling import SMOTE oversampling = SMOTE(sampling_strategy=0.5) x_over, y_over = oversampling.fit_resample(x, y) print('x over sampled shape: ', x_over.shape) print('y over sampled shape: ', y_over.shape) print(pd.DataFrame(y_over).value_counts())
code
106207999/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() h_parameters = {'max_features': ['sqrt', 'log2'], 'ccp_alpha': [0.1, 0.01, 0.001], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']} model = DecisionTreeClassifier() kfold = KFold(n_splits=5, shuffle=True, random_state=101) grid = GridSearchCV(estimator=model, param_grid=h_parameters, cv=kfold) grid.fit(x_train, y_train) model = DecisionTreeClassifier(max_features='log2', max_depth=8, criterion='gini', ccp_alpha=0.001) model.fit(x_train, y_train)
code
106207999/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features
code
106207999/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') print(df['stroke'].value_counts()) no_stroke = df['stroke'].value_counts()[0] / len(df['stroke']) * 100 had_stroke = df['stroke'].value_counts()[1] / len(df['stroke']) * 100 print('\nRatio of the people who had no stroke: %.2f%%' % no_stroke) print('Ratio of the people who had stroke: %.2f%%' % had_stroke)
code
106207999/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) plot_cat_features(data)
code
106207999/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() plot_cat_features(df)
code
106207999/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() plot_nmr_features(df)
code
106207999/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df.describe(include='all')
code
106207999/cell_36
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE from imblearn.under_sampling import RandomUnderSampler from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import cross_validate from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sb import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb sb.set(style='whitegrid') import os df = pd.read_csv('../input/full-filled-brain-stroke-dataset/full_data.csv') df[['hypertension', 'heart_disease', 'stroke']] = df[['hypertension', 'heart_disease', 'stroke']].astype('object') def plot_cat_features(df): fig, ax = plt.subplots(2, 3, figsize=(18,12)) sb.countplot(x='stroke', data=df, hue='gender', ax=ax[0,0]) sb.countplot(x='stroke', data=df, hue='hypertension', ax=ax[0,1]) sb.countplot(x='stroke', data=df, hue='heart_disease', ax=ax[0,2]) sb.countplot(x='stroke', data=df, hue='ever_married', ax=ax[1,0]) sb.countplot(x='stroke', data=df, hue='work_type', ax=ax[1,1]) sb.countplot(x='stroke', data=df, hue='smoking_status', ax=ax[1,2]) fig.suptitle('Bar graph showing the number of people who had or not Stroke', y=0.93, fontsize=22) plt.show() def plot_nmr_features(df): fig, ax = plt.subplots(3, 2, figsize=(12, 18)) sb.distplot(df['age'], label='Skewness: %.2f'%(df['age'].skew()), ax=ax[0,0]) ax[0,0].legend(loc='best') sb.boxplot(df['age'], ax=ax[0,1]) sb.distplot(df['avg_glucose_level'], label='Skewness: %.2f'%df['avg_glucose_level'].skew(), ax=ax[1,0]) ax[1,0].legend(loc='best') sb.boxplot(df['avg_glucose_level'], ax=ax[1,1]) sb.distplot(df['bmi'], label='Skewness: %.2f'%df['bmi'].skew(), ax=ax[2,0]) ax[2,0].legend(loc='best') sb.boxplot(df['bmi'], ax=ax[2,1]) fig.suptitle('Distribution of numerical features', y=0.93, fontsize=22) plt.show() df.isnull().sum() data = df[(df['avg_glucose_level'] <= 160) & (df['bmi'] <= 45)] data = data.reset_index(drop=True) features = data.drop('stroke', axis=1).columns features cat_features = [] nmr_features = [] for i in range(len(features)): if df.iloc[:, i].dtype == 'object': cat_features.append(df.columns[i]) else: nmr_features.append(df.columns[i]) data = pd.get_dummies(data, columns=cat_features, drop_first=True) x = data.drop('stroke', axis=1) y = data['stroke'].astype('int') from imblearn.under_sampling import RandomUnderSampler undersample = RandomUnderSampler(sampling_strategy=0.5) x_under, y_under = undersample.fit_resample(x, y) pd.DataFrame(y_under).value_counts() def model_evaluation(x, y): models = [] names = [] scoring = ['accuracy', 'precision', 'recall', 'f1'] models.append(('SVC', SVC())) models.append(('DTC', DecisionTreeClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) df_results = pd.DataFrame(columns=['Algorithm', 'Acc Mean', 'Acc STD', 'Pre Mean', 'Pre STD', 'Rec Mean', 'Rec STD', 'F1 Mean', 'F1 STD']) results_acc = [] results_pre = [] results_rec = [] results_f1 = [] for name, model in models: names.append(name) kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=101) result = cross_validate(model, x, y, cv=kfold, scoring=scoring) # Accuracy acc_mean = result['test_accuracy'].mean() acc_std = result['test_accuracy'].std() # Precision pre_mean = result['test_precision'].mean() pre_std = result['test_precision'].std() # Recall rec_mean = result['test_recall'].mean() rec_std = result['test_recall'].std() #F1-Score f1_mean = result['test_f1'].mean() f1_std = result['test_f1'].std() df_result_row = {'Algorithm': name, 'Acc Mean': acc_mean, 'Acc STD': acc_std, 'Pre Mean': pre_mean, 'Pre STD': pre_std, 'Rec Mean': rec_mean, 'Rec STD': rec_std, 'F1 Mean': f1_mean, 'F1 STD': f1_std} df_results = df_results.append(df_result_row, ignore_index=True) results_acc.append(result['test_accuracy']) results_pre.append(result['test_precision']) results_rec.append(result['test_recall']) results_f1.append(result['test_f1']) df_results = df_results.set_index('Algorithm') pd.set_option('display.float_format', lambda x: '%.3f' % x) # Display the mean and standard deviation of all metrics for all algorithms print(df_results) # Display the overall results in a boxplot graph plot_objects = plt.subplots(nrows=1, ncols=4, figsize=(14, 6)) fig, (ax1, ax2, ax3, ax4) = plot_objects ax1.boxplot(results_acc) ax1.set_title('Accuracy', fontsize=14) ax1.set_xticklabels(names) ax2.boxplot(results_pre) ax2.set_title('Precision', fontsize=14) ax2.set_xticklabels(names) ax3.boxplot(results_rec) ax3.set_title('Recall', fontsize=14) ax3.set_xticklabels(names) ax4.boxplot(results_f1) ax4.set_title('F1-Score', fontsize=14) ax4.set_xticklabels(names) plt.show() from imblearn.over_sampling import SMOTE oversampling = SMOTE(sampling_strategy=0.5) x_over, y_over = oversampling.fit_resample(x, y) model_evaluation(x_over, y_over)
code
34133512/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import torch if torch.cuda.is_available(): device = torch.device('cuda') print('There are %d GPU(s) available.' % torch.cuda.device_count()) print('We will use the GPU:', torch.cuda.get_device_name(0)) else: print('No GPU available, using the CPU instead.') device = torch.device('cpu')
code
34133512/cell_11
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.multiclass import OneVsRestClassifier import pandas as pd try: df_train = pd.read_csv('data/train.csv') df_test = pd.read_csv('data/test.csv') except: df_train = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/train.csv') df_test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') df_train.fillna('', inplace=True) df_test.fillna('', inplace=True) tf = TfidfVectorizer(ngram_range=(1, 1)) X_train, X_val, y_train, y_val = train_test_split(tf.fit_transform(df_train['text']), df_train['target']) df_train['is_training'] = [1 if x in y_train.index else 0 for x in df_train.index] parameters = {} clf = OneVsRestClassifier(LogisticRegression(solver='lbfgs')) clf.fit(X_train, y_train) y_val_predict_sentiment = clf.predict(X_val) f1_score(y_val, y_val_predict_sentiment, average='weighted')
code
34133512/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm.notebook import tqdm_notebook import pandas as pd import numpy as np import random import torch import os from sklearn.metrics import f1_score from sklearn.multiclass import OneVsRestClassifier from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder from sklearn.model_selection import train_test_split, GridSearchCV from transformers import BertTokenizer from tqdm.notebook import tqdm_notebook from sklearn.preprocessing import OneHotEncoder import re tqdm_notebook.pandas()
code
34133512/cell_15
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV from spacy import displacy from spacy.util import compounding, minibatch import pandas as pd import random import re import spacy try: df_train = pd.read_csv('data/train.csv') df_test = pd.read_csv('data/test.csv') except: df_train = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/train.csv') df_test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') df_train.fillna('', inplace=True) df_test.fillna('', inplace=True) tf = TfidfVectorizer(ngram_range=(1, 1)) X_train, X_val, y_train, y_val = train_test_split(tf.fit_transform(df_train['text']), df_train['target']) df_train['is_training'] = [1 if x in y_train.index else 0 for x in df_train.index] import spacy from spacy.util import compounding, minibatch train_data = [] for idx, row in df_train[df_train['sentiment'] != 'neutral'].iterrows(): text = row['text'] selected_text = row['selected_text'] if selected_text in text: entities = [] try: for match in re.finditer(re.escape(selected_text), text): start_char = match.start() end_char = match.end() entity_label = row['sentiment'] entities.append((start_char, end_char, entity_label)) except Exception as e: raise e train_data.append((text, {'entities': entities})) def spacy_train_custom(train_data, epochs=10): sample_print = 2 nlp = spacy.blank('en') if 'ner' not in nlp.pipe_names: ner = nlp.create_pipe('ner') nlp.add_pipe(ner, last=True) else: ner = nlp.get_pipe('ner') for _, annotations in train_data: for ent in annotations.get('entities'): ner.add_label(ent[2]) pipe_exceptions = ['ner', 'trf_wordpiecer', 'trf_tok2vec'] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] with nlp.disable_pipes(*other_pipes): nlp.begin_training() n_iterations = epochs for itn in range(n_iterations): random.shuffle(train_data) losses = {} batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) for batch_no, batch in enumerate(batches): texts, annotations = zip(*batch) nlp.update(texts, annotations, drop=0.3, losses=losses) for text, _ in train_data[:sample_print]: doc = nlp(text) return nlp nlp = spacy_train_custom(train_data[0:1000], epochs=20) from spacy import displacy sample = df_train.sample().iloc[0] doc = nlp(sample.text) displacy.render(doc, style='ent') print(sample.sentiment) print(sample.selected_text)
code
34133512/cell_17
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV from spacy.util import compounding, minibatch import pandas as pd import random import re import spacy try: df_train = pd.read_csv('data/train.csv') df_test = pd.read_csv('data/test.csv') except: df_train = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/train.csv') df_test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') df_train.fillna('', inplace=True) df_test.fillna('', inplace=True) tf = TfidfVectorizer(ngram_range=(1, 1)) X_train, X_val, y_train, y_val = train_test_split(tf.fit_transform(df_train['text']), df_train['target']) df_train['is_training'] = [1 if x in y_train.index else 0 for x in df_train.index] import spacy from spacy.util import compounding, minibatch train_data = [] for idx, row in df_train[df_train['sentiment'] != 'neutral'].iterrows(): text = row['text'] selected_text = row['selected_text'] if selected_text in text: entities = [] try: for match in re.finditer(re.escape(selected_text), text): start_char = match.start() end_char = match.end() entity_label = row['sentiment'] entities.append((start_char, end_char, entity_label)) except Exception as e: raise e train_data.append((text, {'entities': entities})) def spacy_train_custom(train_data, epochs=10): sample_print = 2 nlp = spacy.blank('en') if 'ner' not in nlp.pipe_names: ner = nlp.create_pipe('ner') nlp.add_pipe(ner, last=True) else: ner = nlp.get_pipe('ner') for _, annotations in train_data: for ent in annotations.get('entities'): ner.add_label(ent[2]) pipe_exceptions = ['ner', 'trf_wordpiecer', 'trf_tok2vec'] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] with nlp.disable_pipes(*other_pipes): nlp.begin_training() n_iterations = epochs for itn in range(n_iterations): random.shuffle(train_data) losses = {} batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) for batch_no, batch in enumerate(batches): texts, annotations = zip(*batch) nlp.update(texts, annotations, drop=0.3, losses=losses) for text, _ in train_data[:sample_print]: doc = nlp(text) return nlp nlp = spacy_train_custom(train_data[0:1000], epochs=20) df_test['selected_text'] = df_test['text'].progress_apply(lambda x: ' '.join([l.text for l in nlp(x).ents])) df_test['selected_text'] = [row['text'] if row['sentiment'] == 'neutral' or row['selected_text'] == '' else row['selected_text'] for idx, row in df_test.iterrows()]
code
34133512/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV from spacy.util import compounding, minibatch import pandas as pd import random import re import spacy try: df_train = pd.read_csv('data/train.csv') df_test = pd.read_csv('data/test.csv') except: df_train = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/train.csv') df_test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') df_train.fillna('', inplace=True) df_test.fillna('', inplace=True) tf = TfidfVectorizer(ngram_range=(1, 1)) X_train, X_val, y_train, y_val = train_test_split(tf.fit_transform(df_train['text']), df_train['target']) df_train['is_training'] = [1 if x in y_train.index else 0 for x in df_train.index] import spacy from spacy.util import compounding, minibatch train_data = [] for idx, row in df_train[df_train['sentiment'] != 'neutral'].iterrows(): text = row['text'] selected_text = row['selected_text'] if selected_text in text: entities = [] try: for match in re.finditer(re.escape(selected_text), text): start_char = match.start() end_char = match.end() entity_label = row['sentiment'] entities.append((start_char, end_char, entity_label)) except Exception as e: raise e train_data.append((text, {'entities': entities})) def spacy_train_custom(train_data, epochs=10): sample_print = 2 nlp = spacy.blank('en') if 'ner' not in nlp.pipe_names: ner = nlp.create_pipe('ner') nlp.add_pipe(ner, last=True) else: ner = nlp.get_pipe('ner') for _, annotations in train_data: for ent in annotations.get('entities'): ner.add_label(ent[2]) pipe_exceptions = ['ner', 'trf_wordpiecer', 'trf_tok2vec'] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] with nlp.disable_pipes(*other_pipes): nlp.begin_training() n_iterations = epochs for itn in range(n_iterations): random.shuffle(train_data) losses = {} batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) for batch_no, batch in enumerate(batches): texts, annotations = zip(*batch) nlp.update(texts, annotations, drop=0.3, losses=losses) for text, _ in train_data[:sample_print]: doc = nlp(text) return nlp nlp = spacy_train_custom(train_data[0:1000], epochs=20)
code
73069445/cell_2
[ "text_plain_output_1.png" ]
import wandb import wandb print(wandb.__version__)
code
73069445/cell_1
[ "text_plain_output_1.png" ]
!pip install wandb --upgrade
code
73069445/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from kaggle_secrets import UserSecretsClient import wandb import wandb try: from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() secret_value_0 = user_secrets.get_secret('wandb_api') wandb.login(key=secret_value_0) except: print('Go to Add-ons -> Secrets and provide your W&B access token. Use the Label name as wandb_api. \nGet your W&B access token from here: https://wandb.ai/authorize')
code
122251563/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) France_team = df[df['Team'] == 'France'] France_team_grouped = France_team.groupby('Year')['Medal'].sum() X = France_team_grouped.index.values.reshape(-1, 1) y = France_team_grouped.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) lr = LinearRegression() lr.fit(X_train, y_train) y_pred = lr.predict(X_test) mse = mean_squared_error(y_test, y_pred) print('La performance de notre modèle selon la MSE est:', mse)
code
122251563/cell_18
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) France_team = df[df['Team'] == 'France'] France_team_grouped = France_team.groupby('Year')['Medal'].sum() X = France_team_grouped.index.values.reshape(-1, 1) y = France_team_grouped.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) lr = LinearRegression() lr.fit(X_train, y_train)
code
122251563/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) def plot_medals_by_country(country): country_team = df[df['Team'] == country] country_host = df[(df['Team'] == country) & (df['Pays_Hebergeur'] == country)] country_team_grouped = country_team.groupby('Year')['Medal'].sum() country_host_sum = country_host.groupby('Year')['Medal'].sum() plot_medals_by_country('China')
code
122251563/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) def plot_medals_by_country(country): country_team = df[df['Team'] == country] country_host = df[(df['Team'] == country) & (df['Pays_Hebergeur'] == country)] country_team_grouped = country_team.groupby('Year')['Medal'].sum() country_host_sum = country_host.groupby('Year')['Medal'].sum() plot_medals_by_country('Spain')
code
122251563/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import numpy as np import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) France_team = df[df['Team'] == 'France'] France_team_grouped = France_team.groupby('Year')['Medal'].sum() X = France_team_grouped.index.values.reshape(-1, 1) y = France_team_grouped.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) lr = LinearRegression() lr.fit(X_train, y_train) y_pred = lr.predict(X_test) mse = mean_squared_error(y_test, y_pred) france_2024 = df1[(df1['Team'] == 'France') & (df1['Year'] == 2024)] X_2024 = np.array([[2024]]) y_pred_2024 = lr.predict(X_2024) print('Le nombre de médailles prédit:', y_pred_2024[0])
code
122251563/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df1 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv') df2 = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv') df = pd.merge(df1, df2, left_on='NOC', right_on='NOC') df = df.query('Season == "Summer"') df.replace('USA', 'United States of America', inplace=True) df.replace('Tanzania', 'United Republic of Tanzania', inplace=True) df.replace('Democratic Republic of Congo', 'Democratic Republic of the Congo', inplace=True) df.replace('Congo', 'Republic of the Congo', inplace=True) df.replace('Lao', 'Laos', inplace=True) df.replace('Syrian Arab Republic', 'Syria', inplace=True) df.replace('Serbia', 'Republic of Serbia', inplace=True) df.replace('Czechia', 'Czech Republic', inplace=True) df.replace('UAE', 'United Arab Emirates', inplace=True) df.replace('UK', 'United Kingdom', inplace=True) def Pays_Hebergeur(col): if col == 'Rio de Janeiro': return 'Brazil' elif col == 'London': return 'United Kingdom' elif col == 'Beijing': return 'China' elif col == 'Athina': return 'Greece' elif col == 'Sydney' or col == 'Melbourne': return 'Australia' elif col == 'Atlanta' or col == 'Los Angeles' or col == 'St. Louis': return 'United States of America' elif col == 'Barcelona': return 'Spain' elif col == 'Seoul': return 'South Korea' elif col == 'Moskva': return 'Russia' elif col == 'Montreal': return 'Canada' elif col == 'Munich' or col == 'Berlin': return 'Germany' elif col == 'Mexico City': return 'Mexico' elif col == 'Tokyo': return 'Japan' elif col == 'Roma': return 'Italy' elif col == 'Paris': return 'France' elif col == 'Helsinki': return 'Finland' elif col == 'Amsterdam': return 'Netherlands' elif col == 'Antwerpen': return 'Belgium' elif col == 'Stockholm': return 'Sweden' else: return 'Other' df['Pays_Hebergeur'] = df['City'].apply(Pays_Hebergeur) def plot_medals_by_country(country): country_team = df[df['Team'] == country] country_host = df[(df['Team'] == country) & (df['Pays_Hebergeur'] == country)] country_team_grouped = country_team.groupby('Year')['Medal'].sum() country_host_sum = country_host.groupby('Year')['Medal'].sum() plot_medals_by_country('France')
code
18120119/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist']
code
18120119/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' with open(example2_path, 'w') as file2: file2.write('This is line A') with open(example2_path, 'w') as file2: file2.write('This is line A\n') file2.write('This is line B\n') file2.write('This is line C\n')
code
18120119/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close()
code
18120119/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
!wget -O ../data/Example1.txt https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
code
18120119/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) df.head()
code
18120119/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' with open(example2_path, 'w') as file2: file2.write('This is line A') with open(example2_path, 'w') as file2: file2.write('This is line A\n') file2.write('This is line B\n') file2.write('This is line C\n') lines = ['This is line D\n', 'This is line E\n', 'This is line F\n'] lines with open(example2_path, 'a') as file2: for line in lines: file2.write(line)
code
18120119/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url)
code
18120119/cell_44
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3]
code
18120119/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' with open(example2_path, 'w') as file2: file2.write('This is line A') with open(example2_path, 'r') as file2: print(file2.read())
code
18120119/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') print(f'file1 object = {file1}') print(f'Type of file1 object = {type(file1)}')
code
18120119/cell_40
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.head()
code
18120119/cell_39
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path)
code
18120119/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' example3_path = '../data/Example3.txt' with open(example2_path, 'r') as readfile: with open(example3_path, 'w') as writefile: for line in readfile: writefile.write(line)
code
18120119/cell_48
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] len(df['Released'])
code
18120119/cell_41
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0]
code
18120119/cell_54
[ "application_vnd.jupyter.stderr_output_1.png" ]
with open('../data/new_songs.csv', 'r') as songsfile: print(songsfile.read())
code
18120119/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' with open(example2_path, 'w') as file2: file2.write('This is line A')
code
18120119/cell_50
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] len(df['Released'].unique())
code
18120119/cell_52
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] new_songs = df[df['Released'] >= 1980] new_songs
code
18120119/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name
code
18120119/cell_45
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released']
code
18120119/cell_49
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] df['Released'].unique()
code
18120119/cell_51
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] df['Released'] >= 1980
code
18120119/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
example3_path = '../data/Example3.txt' testfile.name
code
18120119/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode
code
18120119/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close() file1.closed with open(example1_path, 'r') as file1: file_contents = file1.read() with open(example1_path, 'r') as file1: file_contents = file1.readlines() with open(example1_path, 'r') as file1: file_contents = file1.readline() print(f'file_contents \n{file_contents}') print(file1.closed) print(f'file_contents \n{file_contents}')
code
18120119/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close() file1.closed with open(example1_path, 'r') as file1: file_contents = file1.read() with open(example1_path, 'r') as file1: file_contents = file1.readlines() with open(example1_path, 'r') as file1: file_contents = file1.readline() with open(example1_path, 'r') as file1: for i, line in enumerate(file1): print(f'Line {i + 1} contains {line}')
code
18120119/cell_47
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] df['Released']
code
18120119/cell_3
[ "text_plain_output_1.png" ]
# Check current working dirctory !pwd
code
18120119/cell_43
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist']
code
18120119/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
example2_path = '../data/example2.txt' with open(example2_path, 'w') as file2: file2.write('This is line A') with open(example2_path, 'w') as file2: file2.write('This is line A\n') file2.write('This is line B\n') file2.write('This is line C\n') lines = ['This is line D\n', 'This is line E\n', 'This is line F\n'] lines with open(example2_path, 'a') as file2: for line in lines: file2.write(line) with open(example2_path, 'r') as file2: print(file2.read())
code
18120119/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close() file1.closed with open(example1_path, 'r') as file1: file_contents = file1.read() with open(example1_path, 'r') as file1: file_contents = file1.readlines() print(f'file_contents \n{file_contents}') print(file1.closed) print(f'file_contents \n{file_contents}')
code
18120119/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
lines = ['This is line D\n', 'This is line E\n', 'This is line F\n'] lines
code
18120119/cell_53
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd csv_url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv' df = pd.read_csv(csv_url) csv_path = '../data/TopSellingAlbums.csv' df = pd.read_csv(csv_path) df.iloc[0, 0] df.loc[0, 'Artist'] df.loc[1, 'Artist'] df.iloc[0:2, 0:3] df.loc[0:2, 'Artist':'Released'] new_songs = df[df['Released'] >= 1980] new_songs new_songs.to_csv('../data/new_songs.csv')
code
18120119/cell_10
[ "text_plain_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close() file1.closed
code
18120119/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
example3_path = '../data/Example3.txt' with open(example3_path, 'r') as testfile: print(testfile.read())
code
18120119/cell_37
[ "application_vnd.jupyter.stderr_output_1.png" ]
!wget -O ./data/TopSellingAlbums.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Datasets/TopSellingAlbums.csv
code
18120119/cell_12
[ "text_plain_output_1.png" ]
example1_path = '../data/Example1.txt' file1 = open(example1_path, 'r') file1.name file1.mode file1.close() file1.closed with open(example1_path, 'r') as file1: file_contents = file1.read() print(f'file_contents \n{file_contents}') print(file1.closed) print(f'file_contents \n{file_contents}')
code
128043237/cell_9
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from PIL import Image from torch.utils.data import random_split, DataLoader, Dataset from torchvision.io import read_image, ImageReadMode import matplotlib.pyplot as plt import os import torchvision.transforms.functional as TF BATCH_SIZE = 16 IMAGE_SIZE = (256, 256) IN_CHANNELS = 3 LEARNING_RATE = 0.0001 IMAGES_DIR = '/kaggle/input/danish-golf-courses-orthophotos/1. orthophotos/' SEGMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/2. segmentation masks/' LABELMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/3. class masks/' #Loading the data orthophoto_list = os.listdir(IMAGES_DIR) print("There are ", len(orthophoto_list), " orthophotos in this dataset!") #Load image with index of 5 (I prefer this image as it shows all the classes) idx = 5 #The index can be changed to view other orthophotos. golf_image = Image.open(os.path.join(IMAGES_DIR, orthophoto_list[idx])) golf_segmask = Image.open(os.path.join(SEGMASKS_DIR, orthophoto_list[idx].replace(".jpg", ".png"))) #The class masks are png instead of jpg #Plot using matplotlib fig, axes = plt.subplots(1, 2) axes[0].set_title('Orthophoto') axes[1].set_title('Segmentation Mask') axes[0].imshow(golf_image) axes[1].imshow(golf_segmask) class GolfDataset(Dataset): def __init__(self, images_dir, labelmasks_dir): self.images_dir = images_dir self.labelmasks_dir = labelmasks_dir self.images_dir_list = os.listdir(images_dir) def __len__(self): return len(self.images_dir_list) def __getitem__(self, idx): image_path = os.path.join(self.images_dir, self.images_dir_list[idx]) image = read_image(image_path, ImageReadMode.RGB) label_mask_path = os.path.join(self.labelmasks_dir, self.images_dir_list[idx]).replace('.jpg', '.png') label_mask = read_image(label_mask_path, ImageReadMode.GRAY) image = TF.resize(image, IMAGE_SIZE) image = image.float() image = image / 255 label_mask = TF.resize(label_mask, IMAGE_SIZE) label_mask = TF.rgb_to_grayscale(label_mask) label_mask = label_mask.float() return (image, label_mask) golf_ds = GolfDataset(IMAGES_DIR, LABELMASKS_DIR) idx = 5 orthophoto = golf_ds.__getitem__(idx)[0] label_mask = golf_ds.__getitem__(idx)[1] print('Ortophoto: ', orthophoto.shape, orthophoto) print('Label:', label_mask.shape, label_mask)
code
128043237/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from torch import nn from torch.utils.data import random_split, DataLoader, Dataset from torchvision.io import read_image, ImageReadMode import matplotlib.pyplot as plt import os import pytorch_lightning as pl import torch import torchvision.transforms as T import torchvision.transforms.functional as TF BATCH_SIZE = 16 IMAGE_SIZE = (256, 256) IN_CHANNELS = 3 LEARNING_RATE = 0.0001 IMAGES_DIR = '/kaggle/input/danish-golf-courses-orthophotos/1. orthophotos/' SEGMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/2. segmentation masks/' LABELMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/3. class masks/' #Loading the data orthophoto_list = os.listdir(IMAGES_DIR) print("There are ", len(orthophoto_list), " orthophotos in this dataset!") #Load image with index of 5 (I prefer this image as it shows all the classes) idx = 5 #The index can be changed to view other orthophotos. golf_image = Image.open(os.path.join(IMAGES_DIR, orthophoto_list[idx])) golf_segmask = Image.open(os.path.join(SEGMASKS_DIR, orthophoto_list[idx].replace(".jpg", ".png"))) #The class masks are png instead of jpg #Plot using matplotlib fig, axes = plt.subplots(1, 2) axes[0].set_title('Orthophoto') axes[1].set_title('Segmentation Mask') axes[0].imshow(golf_image) axes[1].imshow(golf_segmask) class GolfDataset(Dataset): def __init__(self, images_dir, labelmasks_dir): self.images_dir = images_dir self.labelmasks_dir = labelmasks_dir self.images_dir_list = os.listdir(images_dir) def __len__(self): return len(self.images_dir_list) def __getitem__(self, idx): image_path = os.path.join(self.images_dir, self.images_dir_list[idx]) image = read_image(image_path, ImageReadMode.RGB) label_mask_path = os.path.join(self.labelmasks_dir, self.images_dir_list[idx]).replace('.jpg', '.png') label_mask = read_image(label_mask_path, ImageReadMode.GRAY) image = TF.resize(image, IMAGE_SIZE) image = image.float() image = image / 255 label_mask = TF.resize(label_mask, IMAGE_SIZE) label_mask = TF.rgb_to_grayscale(label_mask) label_mask = label_mask.float() return (image, label_mask) class GolfDataModule(pl.LightningDataModule): def __init__(self, batch_size): super().__init__() self.batch_size = batch_size self.all_images = [] def prepare_data(self): pass def setup(self, stage=None): self.all_images = GolfDataset(IMAGES_DIR, LABELMASKS_DIR) self.train_data, self.val_data, self.test_data = random_split(self.all_images, [0.7, 0.2, 0.1]) def train_dataloader(self): return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=2, pin_memory=True, persistent_workers=True) def val_dataloader(self): return DataLoader(self.val_data, batch_size=self.batch_size, num_workers=2, pin_memory=True, persistent_workers=True) def test_dataloader(self): return DataLoader(self.test_data, batch_size=self.batch_size, num_workers=2, pin_memory=True, persistent_workers=True) class UNetModel(pl.LightningModule): def __init__(self): super().__init__() class DoubleConvSame(nn.Module): def __init__(self, c_in, c_out): super(DoubleConvSame, self).__init__() self.conv = nn.Sequential(nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels=c_out, out_channels=c_out, kernel_size=3, padding=1), nn.ReLU(inplace=True)) def forward(self, x): return self.conv(x) self.conv1 = DoubleConvSame(c_in=3, c_out=64) self.conv2 = DoubleConvSame(c_in=64, c_out=128) self.conv3 = DoubleConvSame(c_in=128, c_out=256) self.conv4 = DoubleConvSame(c_in=256, c_out=512) self.conv5 = DoubleConvSame(c_in=512, c_out=1024) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.up1 = nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2) self.up2 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=2, stride=2) self.up3 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=2, stride=2) self.up4 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=2, stride=2) self.up_conv1 = DoubleConvSame(c_in=1024, c_out=512) self.up_conv2 = DoubleConvSame(c_in=512, c_out=256) self.up_conv3 = DoubleConvSame(c_in=256, c_out=128) self.up_conv4 = DoubleConvSame(c_in=128, c_out=64) self.conv_1x1 = nn.Conv2d(in_channels=64, out_channels=6, kernel_size=1) self.loss_fn = nn.CrossEntropyLoss() self.train_loss = [] self.val_loss = [] def crop_tensor(self, up_tensor, target_tensor): _, _, H, W = up_tensor.shape x = T.CenterCrop(size=(H, W))(target_tensor) return x def forward(self, x): """ENCODER""" c1 = self.conv1(x) p1 = self.pool(c1) c2 = self.conv2(p1) p2 = self.pool(c2) c3 = self.conv3(p2) p3 = self.pool(c3) c4 = self.conv4(p3) p4 = self.pool(c4) 'BOTTLE-NECK' c5 = self.conv5(p4) 'DECODER' u1 = self.up1(c5) crop1 = self.crop_tensor(u1, c4) cat1 = torch.cat([u1, crop1], dim=1) uc1 = self.up_conv1(cat1) u2 = self.up2(uc1) crop2 = self.crop_tensor(u2, c3) cat2 = torch.cat([u2, crop2], dim=1) uc2 = self.up_conv2(cat2) u3 = self.up3(uc2) crop3 = self.crop_tensor(u3, c2) cat3 = torch.cat([u3, crop3], dim=1) uc3 = self.up_conv3(cat3) u4 = self.up4(uc3) crop4 = self.crop_tensor(u4, c1) cat4 = torch.cat([u4, crop4], dim=1) uc4 = self.up_conv4(cat4) outputs = self.conv_1x1(uc4) return outputs def training_step(self, batch, batch_idx): x, y = batch y_pred = self.forward(x) _y = torch.squeeze(y).long() loss = self.loss_fn(y_pred, _y) return loss def validation_step(self, batch, batch_idx): x, y = batch y_pred = self.forward(x) _y = torch.squeeze(y).long() loss = self.loss_fn(y_pred, _y) return loss def test_step(self, batch, batch_idx): x, y = batch y_pred = self.forward(x) _y = torch.squeeze(y).long() loss = self.loss_fn(y_pred, _y) save_predictions_as_imgs(x, y, y_pred, counter=batch_idx) return loss def test_epoch_end(self, outs): pass def configure_optimizers(self): return torch.optim.AdamW(self.parameters(), lr=LEARNING_RATE) train_loader = GolfDataModule(BATCH_SIZE) trainer = pl.Trainer(max_epochs=50, accelerator='gpu', devices=2, log_every_n_steps=24, strategy='ddp_notebook_find_unused_parameters_false') model = UNetModel() trainer.fit(model, train_loader) trainer = pl.Trainer(devices=1, num_nodes=1, accelerator='gpu') trainer.test(model, train_loader)
code
128043237/cell_22
[ "text_plain_output_1.png" ]
from PIL import Image from torch.utils.data import random_split, DataLoader, Dataset from torchvision.io import read_image, ImageReadMode import matplotlib.pyplot as plt import os import torchvision.transforms.functional as TF BATCH_SIZE = 16 IMAGE_SIZE = (256, 256) IN_CHANNELS = 3 LEARNING_RATE = 0.0001 IMAGES_DIR = '/kaggle/input/danish-golf-courses-orthophotos/1. orthophotos/' SEGMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/2. segmentation masks/' LABELMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/3. class masks/' #Loading the data orthophoto_list = os.listdir(IMAGES_DIR) print("There are ", len(orthophoto_list), " orthophotos in this dataset!") #Load image with index of 5 (I prefer this image as it shows all the classes) idx = 5 #The index can be changed to view other orthophotos. golf_image = Image.open(os.path.join(IMAGES_DIR, orthophoto_list[idx])) golf_segmask = Image.open(os.path.join(SEGMASKS_DIR, orthophoto_list[idx].replace(".jpg", ".png"))) #The class masks are png instead of jpg #Plot using matplotlib fig, axes = plt.subplots(1, 2) axes[0].set_title('Orthophoto') axes[1].set_title('Segmentation Mask') axes[0].imshow(golf_image) axes[1].imshow(golf_segmask) class GolfDataset(Dataset): def __init__(self, images_dir, labelmasks_dir): self.images_dir = images_dir self.labelmasks_dir = labelmasks_dir self.images_dir_list = os.listdir(images_dir) def __len__(self): return len(self.images_dir_list) def __getitem__(self, idx): image_path = os.path.join(self.images_dir, self.images_dir_list[idx]) image = read_image(image_path, ImageReadMode.RGB) label_mask_path = os.path.join(self.labelmasks_dir, self.images_dir_list[idx]).replace('.jpg', '.png') label_mask = read_image(label_mask_path, ImageReadMode.GRAY) image = TF.resize(image, IMAGE_SIZE) image = image.float() image = image / 255 label_mask = TF.resize(label_mask, IMAGE_SIZE) label_mask = TF.rgb_to_grayscale(label_mask) label_mask = label_mask.float() return (image, label_mask) golf_ds = GolfDataset(IMAGES_DIR, LABELMASKS_DIR) idx = 5 orthophoto = golf_ds.__getitem__(idx)[0] label_mask = golf_ds.__getitem__(idx)[1] output_dir = '/kaggle/working/' for idx in range(1, 8): orthophoto = Image.open(output_dir + str(idx) + '_figure.jpg') groundtruth = Image.open(output_dir + str(idx) + '_groundtruth.jpg') prediction = Image.open(output_dir + str(idx) + '_prediction.jpg') fig, axes = plt.subplots(1, 3) fig.set_size_inches(18.5, 15.5) axes[0].set_title('Orthophoto') axes[1].set_title('Groundtruth') axes[2].set_title('Prediction') axes[0].imshow(orthophoto) axes[1].imshow(groundtruth) axes[2].imshow(prediction)
code
128043237/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import os BATCH_SIZE = 16 IMAGE_SIZE = (256, 256) IN_CHANNELS = 3 LEARNING_RATE = 0.0001 IMAGES_DIR = '/kaggle/input/danish-golf-courses-orthophotos/1. orthophotos/' SEGMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/2. segmentation masks/' LABELMASKS_DIR = '/kaggle/input/danish-golf-courses-orthophotos/3. class masks/' orthophoto_list = os.listdir(IMAGES_DIR) print('There are ', len(orthophoto_list), ' orthophotos in this dataset!') idx = 5 golf_image = Image.open(os.path.join(IMAGES_DIR, orthophoto_list[idx])) golf_segmask = Image.open(os.path.join(SEGMASKS_DIR, orthophoto_list[idx].replace('.jpg', '.png'))) fig, axes = plt.subplots(1, 2) axes[0].set_title('Orthophoto') axes[1].set_title('Segmentation Mask') axes[0].imshow(golf_image) axes[1].imshow(golf_segmask)
code