path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90130223/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') print(data.shape) data.head()
code
90130223/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') data.isna().any() mapping = {'e': 1, 'p': 0} data.rename({'class': 'edible'}, axis=1, inplace=True) data['edible'] = data['edible'].replace(mapping) data = data.astype('category') data.dtypes data.head()
code
90130223/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') data.isna().any() mapping = {'e': 1, 'p': 0} data.rename({'class': 'edible'}, axis=1, inplace=True) data['edible'] = data['edible'].replace(mapping) data = data.astype('category') data.dtypes data['edible'].value_counts(normalize='True')
code
90130223/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') data.isna().any() mapping = {'e': 1, 'p': 0} data.rename({'class': 'edible'}, axis=1, inplace=True) data['edible'] = data['edible'].replace(mapping) data = data.astype('category') data.dtypes
code
90130223/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') data.isna().any() mapping = {'e': 1, 'p': 0} data.rename({'class': 'edible'}, axis=1, inplace=True) data['edible'] = data['edible'].replace(mapping) data = data.astype('category') data.dtypes sum = 0 for n in data.nunique(): sum += n sum = sum - data.shape[1] data = pd.get_dummies(data, drop_first=True) print(data.shape) data.head()
code
90130223/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 581 data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv') data.isna().any()
code
105205397/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, classification_report from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') x = data.drop(['Species', 'Id'], axis=1) y = data.Species model = RandomForestClassifier() x_train, x_val, y_train, y_val = train_test_split(x, y) model.fit(x_train, y_train) pred = model.predict(x_val) print(classification_report(y_val, pred))
code
105205397/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/iris/Iris.csv') x = data.drop(['Species', 'Id'], axis=1) y = data.Species sns.countplot(y)
code
105205397/cell_19
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, classification_report from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') x = data.drop(['Species', 'Id'], axis=1) y = data.Species model = RandomForestClassifier() x_train, x_val, y_train, y_val = train_test_split(x, y) model.fit(x_train, y_train) pred = model.predict(x_val) accuracy_score(y_val, pred)
code
105205397/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105205397/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') data.info()
code
105205397/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') data.describe()
code
105205397/cell_16
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') x = data.drop(['Species', 'Id'], axis=1) y = data.Species model = RandomForestClassifier() x_train, x_val, y_train, y_val = train_test_split(x, y) model.fit(x_train, y_train)
code
105205397/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/iris/Iris.csv') x = data.drop(['Species', 'Id'], axis=1) y = data.Species sns.heatmap(data.corr(), annot=True)
code
105205397/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/iris/Iris.csv') data.head()
code
2013560/cell_42
[ "text_html_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape traintestdata['Title'] = traintestdata.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()) (train.Title.unique(), test.Title.unique()) title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'} traintestdata['Title'] = traintestdata['Title'].map(title_map) (train.Title.unique(), test.Title.unique())
code
2013560/cell_21
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return survival_stacked_bar('Pclass')
code
2013560/cell_25
[ "text_html_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return survival_stacked_bar('SibSp')
code
2013560/cell_34
[ "text_html_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape
code
2013560/cell_23
[ "text_html_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return survival_stacked_bar('Embarked')
code
2013560/cell_30
[ "text_html_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') f, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt='.2f', ax=ax)
code
2013560/cell_44
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape traintestdata['Title'] = traintestdata.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()) (train.Title.unique(), test.Title.unique()) title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'} traintestdata['Title'] = traintestdata['Title'].map(title_map) (train.Title.unique(), test.Title.unique()) for i in train.columns: print(i + ': ' + str(sum(train[i].isnull())) + ' missing values')
code
2013560/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') gender_submission.head()
code
2013560/cell_40
[ "text_html_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape traintestdata['Title'] = traintestdata.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()) (train.Title.unique(), test.Title.unique())
code
2013560/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pandas as pd pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set(style='whitegrid') from sklearn.linear_model import LinearRegression import statsmodels.formula.api as sm from sklearn.cross_validation import train_test_split
code
2013560/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) train.info()
code
2013560/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return survival_stacked_bar('Sex')
code
2013560/cell_45
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape traintestdata['Title'] = traintestdata.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()) (train.Title.unique(), test.Title.unique()) title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'} traintestdata['Title'] = traintestdata['Title'].map(title_map) (train.Title.unique(), test.Title.unique()) for i in test.columns: print(i + ': ' + str(sum(test[i].isnull())) + ' missing values')
code
2013560/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) train['Sex'].value_counts().plot(kind='bar')
code
2013560/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90) plt.axis('equal') plt.show()
code
2013560/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape embark_map = {'S': 1, 'C': 2, 'Q': 3} traintestdata['Embarked'] = traintestdata['Embarked'].map(embark_map) survival_stacked_bar('Embarked')
code
2013560/cell_47
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape traintestdata['Title'] = traintestdata.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()) (train.Title.unique(), test.Title.unique()) title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'} traintestdata['Title'] = traintestdata['Title'].map(title_map) (train.Title.unique(), test.Title.unique()) train_set_1 = train.groupby(['Sex', 'Title', 'Pclass', 'Parch']) train_set_1_median = train_set_1.median() train_set_1
code
2013560/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) train['Age'].hist(width=6)
code
2013560/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape)
code
2013560/cell_27
[ "text_html_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return survival_stacked_bar('Parch')
code
2013560/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) test.info()
code
2013560/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') train.head()
code
2013560/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv') (train.shape, test.shape) labels = ('Cherbourg', 'Queenstown', 'Southampton') sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')] colors = ['yellow', 'aqua', 'lime'] plt.axis('equal') def survival_stacked_bar(variable): Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0) Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1) data = pd.DataFrame([Died, Survived]) data.index = ['Did not survived', 'Survived'] return f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax) traintestdata = pd.concat([train, test]) traintestdata.shape sex_map = {'male': 1, 'female': 0} traintestdata['Sex'] = traintestdata['Sex'].map(sex_map) survival_stacked_bar('Sex')
code
2022050/cell_13
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['Right Troop ' + str(i + 1) for i in range(8)], ['Right Troop Count ' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['Left Troop ' + str(i + 1) for i in range(8)], ['Left Troop Count ' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Right Deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Left Deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data.head()
code
2022050/cell_9
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] LD = [len(left_deck) for left_deck in df_cr['Left Deck']] RD = [len(right_deck) for right_deck in df_cr['Right Deck']] (set(LD), set(RD))
code
2022050/cell_23
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['Right Troop ' + str(i + 1) for i in range(8)], ['Right Troop Count ' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['Left Troop ' + str(i + 1) for i in range(8)], ['Left Troop Count ' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Right Deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Left Deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data[(finalCR_data['Battle Result'] == 'Right') & (finalCR_data['Right Stars Won'] == '3')][['Right Troop 1', 'Right Troop 2', 'Right Troop 3', 'Right Troop 4', 'Right Troop 5', 'Right Troop 6', 'Right Troop 7', 'Right Troop 8', 'Result']].groupby(['Right Troop 1', 'Right Troop 2', 'Right Troop 3', 'Right Troop 4', 'Right Troop 5', 'Right Troop 6', 'Right Troop 7', 'Right Troop 8']).count().sort_values(by='Result', ascending=False).head(5)
code
2022050/cell_20
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['Right Troop ' + str(i + 1) for i in range(8)], ['Right Troop Count ' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['Left Troop ' + str(i + 1) for i in range(8)], ['Left Troop Count ' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Right Deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Left Deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data[['Result', 'Battle Result']].groupby('Battle Result').count().apply(lambda x: x / x.sum() * 100)
code
2022050/cell_11
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops)
code
2022050/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2022050/cell_7
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] df_cr.head()
code
2022050/cell_16
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['Right Troop ' + str(i + 1) for i in range(8)], ['Right Troop Count ' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['Left Troop ' + str(i + 1) for i in range(8)], ['Left Troop Count ' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Right Deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Left Deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data.head()
code
2022050/cell_22
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.columns = ['Left Clan', 'Left Deck', 'Left Player', 'Left Trophy', 'Right Clan', 'Right Deck', 'Right Player', 'right Trophy', 'Result', 'Time', 'Type'] Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['Left Deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['Right Deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['Right Troop ' + str(i + 1) for i in range(8)], ['Right Troop Count ' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['Left Troop ' + str(i + 1) for i in range(8)], ['Left Troop Count ' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Right Deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['Left Deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data[(finalCR_data['Battle Result'] == 'Left') & (finalCR_data['Left Stars Won'] == '3')][['Left Troop 1', 'Left Troop 2', 'Left Troop 3', 'Left Troop 4', 'Left Troop 5', 'Left Troop 6', 'Left Troop 7', 'Left Troop 8', 'Result']].groupby(['Left Troop 1', 'Left Troop 2', 'Left Troop 3', 'Left Troop 4', 'Left Troop 5', 'Left Troop 6', 'Left Troop 7', 'Left Troop 8']).count().sort_values(by='Result', ascending=False)
code
2022050/cell_5
[ "text_html_output_1.png" ]
with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] len(CR)
code
34137894/cell_21
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) print('There are %d Num , %d Cat, %d Num-Cat columns.' % (len(num_cols), len(cat_cols), len(num_to_cat_cols)))
code
34137894/cell_9
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_28.png", "image_output_23.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_27.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "image_output_3.png", "image_output_29.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_26.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test_data.shape
code
34137894/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:, i].dropna(), rug=False, hist=False, kde_kws={'bw': 0.1}) plt.xlabel(num_data.columns[i])
code
34137894/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape
code
34137894/cell_18
[ "text_plain_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() from scipy import stats fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() stats.probplot(np.log1p(train_data['SalePrice']), plot=plt)
code
34137894/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape train_data.info()
code
34137894/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10, 5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show()
code
34137894/cell_14
[ "text_plain_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() from scipy import stats stats.probplot(train_data['SalePrice'], plot=plt)
code
34137894/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test_data.shape test_data.info()
code
34137894/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10, 5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show()
code
34137894/cell_5
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.head()
code
128047114/cell_20
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader from tqdm import tqdm import json import matplotlib.pyplot as plt import os import torch import torchvision import torchvision.transforms as T import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img) with open(meta, 'r') as f: metadata = json.load(f) horror_ids = [] drama_ids = [] for id in tqdm(ids): metadata_file = f'{ds_path}{id}.json' with open(metadata_file, 'r') as f: metadata = json.load(f) genres = metadata['genres'] if 'Horror' in genres and 'Drama' not in genres: horror_ids.append(id) if 'Drama' in genres and 'Horror' not in genres: drama_ids.append(id) horror_ids = horror_ids[:2000] drama_ids = drama_ids[:2000] ids = horror_ids + drama_ids labels = [0] * len(horror_ids) + [1] * len(drama_ids) class ImageDataset(torch.utils.data.Dataset): def __init__(self, image_folder, ids, labels, transform=None): self.image_folder = image_folder self.ids = ids self.labels = labels self.transform = transform def __len__(self): return len(self.ids) def __getitem__(self, idx): image_path = self.image_folder + self.ids[idx] + '.jpeg' image = Image.open(image_path).convert('RGB') label = self.labels[idx] if self.transform: image = self.transform(image) return (image, label) X_train, X_valid, y_train, y_valid = train_test_split(ids, labels, test_size=0.4, random_state=42, stratify=labels) X_valid, X_test, y_valid, y_test = train_test_split(X_valid, y_valid, test_size=0.5, random_state=42, stratify=y_valid) img_height, img_width = (512, 512) transform = T.Compose([T.ToTensor(), T.Resize((img_height, img_width)), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_ds = ImageDataset(ds_path, X_train, y_train, transform=transform) valid_ds = ImageDataset(ds_path, X_valid, y_valid, transform=transform) test_ds = ImageDataset(ds_path, X_test, y_test, transform=transform) batch_size = 10 device = 'cuda' if torch.cuda.is_available() else 'cpu' loader_args = dict(batch_size=batch_size, num_workers=os.cpu_count(), pin_memory=True, shuffle=True) train_dl = DataLoader(train_ds, **loader_args) valid_dl = DataLoader(valid_ds, **loader_args) test_dl = DataLoader(test_ds, **loader_args) model = torchvision.models.resnet50(weights=torchvision.models.resnet.ResNet50_Weights.IMAGENET1K_V1) model for param in model.parameters(): param.requires_grad = False model.fc = torch.nn.Linear(model.fc.in_features, 2) loss_fn = torch.nn.CrossEntropyLoss() model.to(device) params_to_update = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.Adam(params_to_update, lr=0.0003) epochs = 7 train_losses = [] valid_losses = [] for epoch in range(epochs): running_loss = 0.0 running_corrects = 0 model.train() with tqdm(total=len(train_ds), desc=f'Train epoch {epoch}/{epochs}', unit='img') as pb: for X, y in train_dl: X, y = (X.to(device), y.to(device)) pred = model(X) loss = loss_fn(pred, y) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() * X.size(0) running_corrects += torch.sum(pred.argmax(1) == y.data) pb.update(X.shape[0]) pb.set_postfix(**{'loss (batch)': loss.item()}) epoch_loss = running_loss / len(train_ds) train_losses.append(epoch_loss) epoch_acc = running_corrects / len(train_ds) running_loss = 0.0 model.eval() with torch.no_grad(): corrects = 0 with tqdm(total=len(valid_ds), desc=f'Validation epoch {epoch}/{epochs - 1}', unit='img') as pb: for X, y in valid_dl: X, y = (X.to(device), y.to(device)) outputs = model(X) _, predicted = torch.max(outputs, 1) loss = loss_fn(outputs, y) running_loss += loss.item() * X.size(0) corrects += (predicted == y).sum().item() pb.update(X.shape[0]) epoch_loss = running_loss / len(valid_ds) accuracy = corrects / len(valid_ds) valid_losses.append(epoch_loss) plt.plot(range(epochs), train_losses, label='train') plt.plot(range(epochs), valid_losses, label='valid') plt.title('Train and test loss') plt.legend() plt.show()
code
128047114/cell_6
[ "image_output_1.png" ]
from PIL import Image import json import os import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img) with open(meta, 'r') as f: metadata = json.load(f) print('Genres:', metadata['genres'])
code
128047114/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) print('Some image ids:', ids[:5])
code
128047114/cell_19
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader from tqdm import tqdm import json import os import torch import torchvision import torchvision.transforms as T import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img) with open(meta, 'r') as f: metadata = json.load(f) horror_ids = [] drama_ids = [] for id in tqdm(ids): metadata_file = f'{ds_path}{id}.json' with open(metadata_file, 'r') as f: metadata = json.load(f) genres = metadata['genres'] if 'Horror' in genres and 'Drama' not in genres: horror_ids.append(id) if 'Drama' in genres and 'Horror' not in genres: drama_ids.append(id) horror_ids = horror_ids[:2000] drama_ids = drama_ids[:2000] ids = horror_ids + drama_ids labels = [0] * len(horror_ids) + [1] * len(drama_ids) class ImageDataset(torch.utils.data.Dataset): def __init__(self, image_folder, ids, labels, transform=None): self.image_folder = image_folder self.ids = ids self.labels = labels self.transform = transform def __len__(self): return len(self.ids) def __getitem__(self, idx): image_path = self.image_folder + self.ids[idx] + '.jpeg' image = Image.open(image_path).convert('RGB') label = self.labels[idx] if self.transform: image = self.transform(image) return (image, label) X_train, X_valid, y_train, y_valid = train_test_split(ids, labels, test_size=0.4, random_state=42, stratify=labels) X_valid, X_test, y_valid, y_test = train_test_split(X_valid, y_valid, test_size=0.5, random_state=42, stratify=y_valid) img_height, img_width = (512, 512) transform = T.Compose([T.ToTensor(), T.Resize((img_height, img_width)), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_ds = ImageDataset(ds_path, X_train, y_train, transform=transform) valid_ds = ImageDataset(ds_path, X_valid, y_valid, transform=transform) test_ds = ImageDataset(ds_path, X_test, y_test, transform=transform) batch_size = 10 device = 'cuda' if torch.cuda.is_available() else 'cpu' loader_args = dict(batch_size=batch_size, num_workers=os.cpu_count(), pin_memory=True, shuffle=True) train_dl = DataLoader(train_ds, **loader_args) valid_dl = DataLoader(valid_ds, **loader_args) test_dl = DataLoader(test_ds, **loader_args) model = torchvision.models.resnet50(weights=torchvision.models.resnet.ResNet50_Weights.IMAGENET1K_V1) model for param in model.parameters(): param.requires_grad = False model.fc = torch.nn.Linear(model.fc.in_features, 2) loss_fn = torch.nn.CrossEntropyLoss() model.to(device) params_to_update = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.Adam(params_to_update, lr=0.0003) epochs = 7 train_losses = [] valid_losses = [] for epoch in range(epochs): running_loss = 0.0 running_corrects = 0 model.train() with tqdm(total=len(train_ds), desc=f'Train epoch {epoch}/{epochs}', unit='img') as pb: for X, y in train_dl: X, y = (X.to(device), y.to(device)) pred = model(X) loss = loss_fn(pred, y) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() * X.size(0) running_corrects += torch.sum(pred.argmax(1) == y.data) pb.update(X.shape[0]) pb.set_postfix(**{'loss (batch)': loss.item()}) epoch_loss = running_loss / len(train_ds) train_losses.append(epoch_loss) epoch_acc = running_corrects / len(train_ds) print(f'Train Loss: {epoch_loss:.4f} Accuracy: {epoch_acc:.4f}') running_loss = 0.0 model.eval() with torch.no_grad(): corrects = 0 with tqdm(total=len(valid_ds), desc=f'Validation epoch {epoch}/{epochs - 1}', unit='img') as pb: for X, y in valid_dl: X, y = (X.to(device), y.to(device)) outputs = model(X) _, predicted = torch.max(outputs, 1) loss = loss_fn(outputs, y) running_loss += loss.item() * X.size(0) corrects += (predicted == y).sum().item() pb.update(X.shape[0]) epoch_loss = running_loss / len(valid_ds) accuracy = corrects / len(valid_ds) valid_losses.append(epoch_loss) print(f'Validation Accuraccy: {accuracy:.4f}')
code
128047114/cell_8
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from tqdm import tqdm import json import os import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img) with open(meta, 'r') as f: metadata = json.load(f) horror_ids = [] drama_ids = [] for id in tqdm(ids): metadata_file = f'{ds_path}{id}.json' with open(metadata_file, 'r') as f: metadata = json.load(f) genres = metadata['genres'] if 'Horror' in genres and 'Drama' not in genres: horror_ids.append(id) if 'Drama' in genres and 'Horror' not in genres: drama_ids.append(id)
code
128047114/cell_16
[ "image_output_1.png" ]
import torchvision model = torchvision.models.resnet50(weights=torchvision.models.resnet.ResNet50_Weights.IMAGENET1K_V1) model
code
128047114/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader from tqdm import tqdm import json import os import torch import torchvision.transforms as T import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img) with open(meta, 'r') as f: metadata = json.load(f) horror_ids = [] drama_ids = [] for id in tqdm(ids): metadata_file = f'{ds_path}{id}.json' with open(metadata_file, 'r') as f: metadata = json.load(f) genres = metadata['genres'] if 'Horror' in genres and 'Drama' not in genres: horror_ids.append(id) if 'Drama' in genres and 'Horror' not in genres: drama_ids.append(id) horror_ids = horror_ids[:2000] drama_ids = drama_ids[:2000] ids = horror_ids + drama_ids labels = [0] * len(horror_ids) + [1] * len(drama_ids) class ImageDataset(torch.utils.data.Dataset): def __init__(self, image_folder, ids, labels, transform=None): self.image_folder = image_folder self.ids = ids self.labels = labels self.transform = transform def __len__(self): return len(self.ids) def __getitem__(self, idx): image_path = self.image_folder + self.ids[idx] + '.jpeg' image = Image.open(image_path).convert('RGB') label = self.labels[idx] if self.transform: image = self.transform(image) return (image, label) X_train, X_valid, y_train, y_valid = train_test_split(ids, labels, test_size=0.4, random_state=42, stratify=labels) X_valid, X_test, y_valid, y_test = train_test_split(X_valid, y_valid, test_size=0.5, random_state=42, stratify=y_valid) img_height, img_width = (512, 512) transform = T.Compose([T.ToTensor(), T.Resize((img_height, img_width)), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_ds = ImageDataset(ds_path, X_train, y_train, transform=transform) valid_ds = ImageDataset(ds_path, X_valid, y_valid, transform=transform) test_ds = ImageDataset(ds_path, X_test, y_test, transform=transform) batch_size = 10 device = 'cuda' if torch.cuda.is_available() else 'cpu' loader_args = dict(batch_size=batch_size, num_workers=os.cpu_count(), pin_memory=True, shuffle=True) train_dl = DataLoader(train_ds, **loader_args) valid_dl = DataLoader(valid_ds, **loader_args) test_dl = DataLoader(test_ds, **loader_args) print('Sizes of the datasets: ', len(train_ds), len(valid_ds), len(test_ds))
code
128047114/cell_5
[ "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_20.png", "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_23.png", "text_plain_output_18.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_22.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_26.png", "text_plain_output_28.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_21.png" ]
from PIL import Image import os import torch from torch.utils.data import DataLoader import torchvision import torchvision.transforms as T from torchvision.io import read_image, ImageReadMode from sklearn.model_selection import train_test_split import os import json from tqdm import tqdm from PIL import Image import matplotlib.pyplot as plt import random Image.MAX_IMAGE_PIXELS = None ds_path = '/kaggle/input/raw-mmimdb/mmimdb/dataset/' ids = list(set([x.split('.')[0] for x in os.listdir(ds_path)])) img = f'{ds_path}{ids[0]}.jpeg' meta = f'{ds_path}{ids[0]}.json' Image.open(img)
code
73088112/cell_21
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() test.select_dtypes(exclude=[np.number]).describe()
code
73088112/cell_25
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() def missing_values_table(df): mis_val = df.isnull().sum() mis_val_percent = 100 * df.isnull().sum() / len(df) mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) mis_val_table_ren_columns = mis_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) mis_val_table_ren_columns = mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return mis_val_table_ren_columns missing_values_table(train)
code
73088112/cell_34
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() train.Price.describe()
code
73088112/cell_23
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() display(train[train.BuildingArea == 0].head(5)) display(train[train.BuildingArea == 0].shape)
code
73088112/cell_20
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() train.select_dtypes(exclude=[np.number]).describe()
code
73088112/cell_29
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() train.Price.describe()
code
73088112/cell_39
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() sns.distplot(train['Price'], fit=norm); fig = plt.figure() res = stats.probplot(train['Price'], plot=plt) print ("Skew is:", train.Price.skew()) sns.distplot(np.log1p(train['Price']), fit=norm) fig = plt.figure() res = stats.probplot(np.log1p(train['Price']), plot=plt) print('Skew is:', np.log1p(train.Price).skew())
code
73088112/cell_26
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() def missing_values_table(df): mis_val = df.isnull().sum() mis_val_percent = 100 * df.isnull().sum() / len(df) mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) mis_val_table_ren_columns = mis_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) mis_val_table_ren_columns = mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return mis_val_table_ren_columns missing_values_table(train) missing_values_table(test)
code
73088112/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] print(duplicate_row)
code
73088112/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') test.describe()
code
73088112/cell_7
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') train.head(5)
code
73088112/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() train.describe()
code
73088112/cell_8
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') print(f'Kích thước tập train: {train.shape}') print(f'Kích thước tập test: {test.shape}')
code
73088112/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts()
code
73088112/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() sns.distplot(train['Price'], fit=norm) fig = plt.figure() res = stats.probplot(train['Price'], plot=plt) print('Skew is:', train.Price.skew())
code
73088112/cell_31
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() sns.distplot(train['Price'])
code
73088112/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.info()
code
73088112/cell_36
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/massp-housing-prices-in-melbourne/train.csv', index_col='id') test = pd.read_csv('../input/massp-housing-prices-in-melbourne/test.csv', index_col='id') duplicate_row = train[train.duplicated()] train.dtypes.value_counts() sns.distplot(train['Price'], fit=norm); fig = plt.figure() res = stats.probplot(train['Price'], plot=plt) print ("Skew is:", train.Price.skew()) sns.boxplot(train['Price'], orient='v')
code
128014397/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
autoencoder.fit(X_train, X_train, epochs=100, batch_size=32, shuffle=True, validation_data=(X_test, X_test), verbose=0)
code
128014397/cell_20
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical from pathlib import Path from sklearn.preprocessing import OrdinalEncoder import numpy as np import pandas as pd INPUT_DIR = Path('/kaggle/input/playground-series-s3e13/') train_data = pd.read_csv(INPUT_DIR / 'train.csv') train_data['data_type'] = 0 train_data['prognosis'] = train_data['prognosis'].str.replace(' ', '_') test_data = pd.read_csv(INPUT_DIR / 'test.csv') test_data['data_type'] = 0 features = sorted(list(set(test_data.columns) - set(['id', 'data_type']))) if INCLUDE_ORIGINAL: df_original = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv') df_original['id'] = -1 - np.arange(len(df_original)) df_original['prognosis'] = df_original['prognosis'].str.replace(' ', '_') df_original['data_type'] = 1 train_data = pd.concat([df_original, train_data]).reset_index(drop=True) features = sorted(list(set(test_data.columns) - set(['id']))) from sklearn.preprocessing import OrdinalEncoder from keras.utils import to_categorical enc = OrdinalEncoder() y_enc = enc.fit_transform(train_data.filter(['prognosis'])) y = to_categorical(y_enc) prognosis_classes = list(enc.categories_[0]) N_CLASSES = len(prognosis_classes) feats = list(features) X = train_data.filter(feats).values X_train = train_data.drop('data_type', axis=1).filter(feats).values X_data_type = train_data['data_type'].values X_test = test_data.drop('data_type', axis=1).filter(feats).values X_tst = test_data.filter(feats).values n_components = 5 decomp = Decomp(n_components=n_components, method='umap', scaler_method=None) umap_train = decomp.dimension_reduction(X_train) umap_test = decomp.transform(X_test) n_components = 7 decomp = Decomp(n_components=n_components, method='pca', scaler_method=None) pca_train = decomp.dimension_reduction(X_train) pca_test = decomp.transform(X_test) n_components = 8 decomp = Decomp(n_components=n_components, method='LDA', scaler_method=None) lda_train = decomp.dimension_reduction(X_train, y_enc.flatten()) lda_test = decomp.transform(X_test) print(f' --> lda(n_components={n_components})')
code
128014397/cell_26
[ "text_plain_output_1.png" ]
from colorama import Fore, Back, Style from keras.utils import to_categorical from pathlib import Path from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import log_loss from sklearn.model_selection import StratifiedKFold, KFold from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, QuantileTransformer, RobustScaler from sklearn.preprocessing import OrdinalEncoder from tensorflow import keras from tensorflow.keras import layers, models, Sequential from umap import UMAP import numpy as np import pandas as pd import tensorflow as tf import tensorflow_ranking as tfr INPUT_DIR = Path('/kaggle/input/playground-series-s3e13/') train_data = pd.read_csv(INPUT_DIR / 'train.csv') train_data['data_type'] = 0 train_data['prognosis'] = train_data['prognosis'].str.replace(' ', '_') test_data = pd.read_csv(INPUT_DIR / 'test.csv') test_data['data_type'] = 0 features = sorted(list(set(test_data.columns) - set(['id', 'data_type']))) if INCLUDE_ORIGINAL: df_original = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv') df_original['id'] = -1 - np.arange(len(df_original)) df_original['prognosis'] = df_original['prognosis'].str.replace(' ', '_') df_original['data_type'] = 1 train_data = pd.concat([df_original, train_data]).reset_index(drop=True) features = sorted(list(set(test_data.columns) - set(['id']))) from sklearn.preprocessing import OrdinalEncoder from keras.utils import to_categorical enc = OrdinalEncoder() y_enc = enc.fit_transform(train_data.filter(['prognosis'])) y = to_categorical(y_enc) prognosis_classes = list(enc.categories_[0]) N_CLASSES = len(prognosis_classes) feats = list(features) X = train_data.filter(feats).values X_train = train_data.drop('data_type', axis=1).filter(feats).values X_data_type = train_data['data_type'].values X_test = test_data.drop('data_type', axis=1).filter(feats).values X_tst = test_data.filter(feats).values encoder_dim = 12 dim_input = layers.Input(shape=(64,)) encoded_layer_0 = tf.keras.layers.GaussianNoise(0.1)(dim_input) encoded_layer_1 = layers.Dense(60, activation='relu')(encoded_layer_0) encoded_layer_2 = layers.Dense(50, activation='relu')(encoded_layer_1) encoded_layer_3 = layers.Dense(40, activation='relu')(encoded_layer_2) encoded_layer_4 = layers.Dense(30, activation='relu')(encoded_layer_3) encoded_layer_5 = layers.Dense(20, activation='relu')(encoded_layer_4) encoded_layer_6 = layers.Dense(encoder_dim, activation='softmax')(encoded_layer_5) decoded_layer_1 = layers.Dense(20, activation='relu')(encoded_layer_6) decoded_layer_2 = layers.Dense(30, activation='relu')(decoded_layer_1) decoded_layer_3 = layers.Dense(40, activation='relu')(decoded_layer_2) decoded_layer_4 = layers.Dense(50, activation='relu')(decoded_layer_3) decoded_layer_5 = layers.Dense(60, activation='relu')(decoded_layer_4) decoded_layer_6 = layers.Dense(64, activation='relu')(decoded_layer_5) autoencoder = keras.Model(inputs=dim_input, outputs=decoded_layer_6) autoencoder.compile(loss='categorical_crossentropy', metrics='categorical_accuracy') encoder = keras.Model(inputs=dim_input, outputs=encoded_layer_6) encoded_input = layers.Input(shape=(encoder_dim,)) encoded_train = pd.DataFrame(encoder.predict(X_train)) encoded_train = encoded_train.add_prefix('feature_') encoded_test = pd.DataFrame(encoder.predict(X_test)) encoded_test = encoded_test.add_prefix('feature_') class Decomp: def __init__(self, n_components, method='pca', scaler_method='standard'): self.n_components = n_components self.method = method self.scaler_method = scaler_method def dimension_reduction(self, df, y=None): if self.method == 'LDA': X_reduced = self.dimension_method(df, y) else: X_reduced = self.dimension_method(df) df_comp = pd.DataFrame(X_reduced, columns=[f'{self.method.upper()}_{_}' for _ in range(self.n_components)]) return df_comp def dimension_method(self, df, y=None): X = self.scaler(df) if self.method == 'pca': comp = PCA(n_components=self.n_components, random_state=0) X_reduced = comp.fit_transform(X) elif self.method == 'nmf': comp = NMF(n_components=self.n_components, random_state=0) X_reduced = comp.fit_transform(X) elif self.method == 'umap': comp = UMAP(n_components=self.n_components, random_state=0) X_reduced = comp.fit_transform(X) elif self.method == 'tsne': comp = TSNE(n_components=self.n_components, random_state=0) X_reduced = comp.fit_transform(X) elif self.method == 'LDA': comp = LinearDiscriminantAnalysis(n_components=self.n_components) X_reduced = comp.fit_transform(X, y) else: raise ValueError(f'Invalid method name: {method}') self.comp = comp return X_reduced def scaler(self, df): _df = df.copy() if self.scaler_method == 'standard': return StandardScaler().fit_transform(_df) elif self.scaler_method == 'minmax': return MinMaxScaler().fit_transform(_df) elif self.scaler_method == None: return _df else: raise ValueError(f'Invalid scaler_method name') def get_columns(self): return [f'{self.method.upper()}_{_}' for _ in range(self.n_components)] def transform(self, df, y=None): X = self.scaler(df) X_reduced = self.comp.transform(X) df_comp = pd.DataFrame(X_reduced, columns=[f'{self.method.upper()}_{_}' for _ in range(self.n_components)]) return df_comp @property def get_explained_variance_ratio(self): return np.sum(self.comp.explained_variance_ratio_) n_components = 5 decomp = Decomp(n_components=n_components, method='umap', scaler_method=None) umap_train = decomp.dimension_reduction(X_train) umap_test = decomp.transform(X_test) n_components = 7 decomp = Decomp(n_components=n_components, method='pca', scaler_method=None) pca_train = decomp.dimension_reduction(X_train) pca_test = decomp.transform(X_test) n_components = 8 decomp = Decomp(n_components=n_components, method='LDA', scaler_method=None) lda_train = decomp.dimension_reduction(X_train, y_enc.flatten()) lda_test = decomp.transform(X_test) new_all_data = pd.concat([train_data, umap_train, lda_train], axis=1) new_test = pd.concat([test_data, umap_test, lda_test], axis=1) def build_model(input_shape): def mish(x): return keras.layers.Lambda(lambda x: x * K.tanh(K.softplus(x)))(x) inputs = layers.Input(shape=input_shape) x = layers.Dropout(0.2)(inputs) x = layers.Dense(64, activation=mish)(x) for _ in range(1): x = layers.BatchNormalization()(x) x = layers.Dense(32, activation=mish)(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(N_CLASSES, activation='softmax')(x) return keras.Model(inputs, outputs) epochs = 100 lr_start = 0.002 lr_end = 1e-07 def cosine_decay(epoch): if epochs > 1: w = (1 + np.cos(epoch / (epochs - 1) * np.pi)) / 2 else: w = 1 return w * lr_start + (1 - w) * lr_end lr = keras.callbacks.LearningRateScheduler(cosine_decay, verbose=0) callbacks = [lr, keras.callbacks.TerminateOnNaN()] def map3_from_logloss(y_enc, preds, data_type): return map3(y_enc[data_type == 0], preds[data_type == 0]) def fold_logloss(y_enc, preds, data_type): return log_loss(y_enc[data_type == 0], preds[data_type == 0]) N_FOLDS = 10 N_REPEATS = 10 features = sorted(list(set(new_test.columns) - set(['id', 'data_type']))) feats = list(features) X = new_all_data.filter(feats).values.astype(np.float32) X_data_type = new_all_data['data_type'].values.astype(np.float32) X_test = new_test.filter(feats).values.astype(np.float32) oof_preds = np.zeros((len(train_data), N_CLASSES)) test_preds = np.zeros((len(test_data), N_CLASSES)) oof_metrics = [] oof_lls = [] for i in range(N_REPEATS): k_fold = StratifiedKFold(n_splits=N_FOLDS, random_state=RANDOM_STATE + i, shuffle=True) for train_index, test_index in k_fold.split(X, y_enc.flatten()): X_train, X_valid = (X[train_index], X[test_index]) y_train, y_valid = (y[train_index], y[test_index]) model = build_model(input_shape=(len(feats),)) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=[tfr.keras.metrics.MeanAveragePrecisionMetric(topn=3)]) history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=32, epochs=epochs, callbacks=callbacks, verbose=0) oof_pred = model.predict(X_valid, verbose=0) test_pred = model.predict(X_test, verbose=0) oof_metric = map3_from_logloss(y_valid, oof_pred, X_data_type[test_index]) oof_ll = fold_logloss(y_valid, oof_pred, X_data_type[test_index]) oof_metrics.append(oof_metric) oof_lls.append(oof_ll) oof_preds[test_index] += oof_pred / N_REPEATS test_preds += test_pred / (N_REPEATS * N_FOLDS) oof_metric = np.round(np.mean(oof_metrics), 5) oof_ll = np.round(np.mean(oof_lls), 5) print(f'{Fore.GREEN}{Style.BRIGHT}Average metric = {round(oof_metric, 5)}{Style.RESET_ALL}')
code
128014397/cell_19
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical from pathlib import Path from sklearn.preprocessing import OrdinalEncoder import numpy as np import pandas as pd INPUT_DIR = Path('/kaggle/input/playground-series-s3e13/') train_data = pd.read_csv(INPUT_DIR / 'train.csv') train_data['data_type'] = 0 train_data['prognosis'] = train_data['prognosis'].str.replace(' ', '_') test_data = pd.read_csv(INPUT_DIR / 'test.csv') test_data['data_type'] = 0 features = sorted(list(set(test_data.columns) - set(['id', 'data_type']))) if INCLUDE_ORIGINAL: df_original = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv') df_original['id'] = -1 - np.arange(len(df_original)) df_original['prognosis'] = df_original['prognosis'].str.replace(' ', '_') df_original['data_type'] = 1 train_data = pd.concat([df_original, train_data]).reset_index(drop=True) features = sorted(list(set(test_data.columns) - set(['id']))) from sklearn.preprocessing import OrdinalEncoder from keras.utils import to_categorical enc = OrdinalEncoder() y_enc = enc.fit_transform(train_data.filter(['prognosis'])) y = to_categorical(y_enc) prognosis_classes = list(enc.categories_[0]) N_CLASSES = len(prognosis_classes) feats = list(features) X = train_data.filter(feats).values X_train = train_data.drop('data_type', axis=1).filter(feats).values X_data_type = train_data['data_type'].values X_test = test_data.drop('data_type', axis=1).filter(feats).values X_tst = test_data.filter(feats).values n_components = 5 decomp = Decomp(n_components=n_components, method='umap', scaler_method=None) umap_train = decomp.dimension_reduction(X_train) umap_test = decomp.transform(X_test) n_components = 7 decomp = Decomp(n_components=n_components, method='pca', scaler_method=None) pca_train = decomp.dimension_reduction(X_train) pca_test = decomp.transform(X_test) print(f' --> pca(n_components={n_components})')
code
128014397/cell_18
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical from pathlib import Path from sklearn.preprocessing import OrdinalEncoder import numpy as np import pandas as pd INPUT_DIR = Path('/kaggle/input/playground-series-s3e13/') train_data = pd.read_csv(INPUT_DIR / 'train.csv') train_data['data_type'] = 0 train_data['prognosis'] = train_data['prognosis'].str.replace(' ', '_') test_data = pd.read_csv(INPUT_DIR / 'test.csv') test_data['data_type'] = 0 features = sorted(list(set(test_data.columns) - set(['id', 'data_type']))) if INCLUDE_ORIGINAL: df_original = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv') df_original['id'] = -1 - np.arange(len(df_original)) df_original['prognosis'] = df_original['prognosis'].str.replace(' ', '_') df_original['data_type'] = 1 train_data = pd.concat([df_original, train_data]).reset_index(drop=True) features = sorted(list(set(test_data.columns) - set(['id']))) from sklearn.preprocessing import OrdinalEncoder from keras.utils import to_categorical enc = OrdinalEncoder() y_enc = enc.fit_transform(train_data.filter(['prognosis'])) y = to_categorical(y_enc) prognosis_classes = list(enc.categories_[0]) N_CLASSES = len(prognosis_classes) feats = list(features) X = train_data.filter(feats).values X_train = train_data.drop('data_type', axis=1).filter(feats).values X_data_type = train_data['data_type'].values X_test = test_data.drop('data_type', axis=1).filter(feats).values X_tst = test_data.filter(feats).values n_components = 5 decomp = Decomp(n_components=n_components, method='umap', scaler_method=None) umap_train = decomp.dimension_reduction(X_train) umap_test = decomp.transform(X_test) print(f' --> UMAP(n_components={n_components})')
code
128014397/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.utils import to_categorical from pathlib import Path from sklearn.preprocessing import OrdinalEncoder from tensorflow import keras from tensorflow.keras import layers, models, Sequential import numpy as np import pandas as pd import tensorflow as tf INPUT_DIR = Path('/kaggle/input/playground-series-s3e13/') train_data = pd.read_csv(INPUT_DIR / 'train.csv') train_data['data_type'] = 0 train_data['prognosis'] = train_data['prognosis'].str.replace(' ', '_') test_data = pd.read_csv(INPUT_DIR / 'test.csv') test_data['data_type'] = 0 features = sorted(list(set(test_data.columns) - set(['id', 'data_type']))) if INCLUDE_ORIGINAL: df_original = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv') df_original['id'] = -1 - np.arange(len(df_original)) df_original['prognosis'] = df_original['prognosis'].str.replace(' ', '_') df_original['data_type'] = 1 train_data = pd.concat([df_original, train_data]).reset_index(drop=True) features = sorted(list(set(test_data.columns) - set(['id']))) from sklearn.preprocessing import OrdinalEncoder from keras.utils import to_categorical enc = OrdinalEncoder() y_enc = enc.fit_transform(train_data.filter(['prognosis'])) y = to_categorical(y_enc) prognosis_classes = list(enc.categories_[0]) N_CLASSES = len(prognosis_classes) feats = list(features) X = train_data.filter(feats).values X_train = train_data.drop('data_type', axis=1).filter(feats).values X_data_type = train_data['data_type'].values X_test = test_data.drop('data_type', axis=1).filter(feats).values X_tst = test_data.filter(feats).values encoder_dim = 12 dim_input = layers.Input(shape=(64,)) encoded_layer_0 = tf.keras.layers.GaussianNoise(0.1)(dim_input) encoded_layer_1 = layers.Dense(60, activation='relu')(encoded_layer_0) encoded_layer_2 = layers.Dense(50, activation='relu')(encoded_layer_1) encoded_layer_3 = layers.Dense(40, activation='relu')(encoded_layer_2) encoded_layer_4 = layers.Dense(30, activation='relu')(encoded_layer_3) encoded_layer_5 = layers.Dense(20, activation='relu')(encoded_layer_4) encoded_layer_6 = layers.Dense(encoder_dim, activation='softmax')(encoded_layer_5) decoded_layer_1 = layers.Dense(20, activation='relu')(encoded_layer_6) decoded_layer_2 = layers.Dense(30, activation='relu')(decoded_layer_1) decoded_layer_3 = layers.Dense(40, activation='relu')(decoded_layer_2) decoded_layer_4 = layers.Dense(50, activation='relu')(decoded_layer_3) decoded_layer_5 = layers.Dense(60, activation='relu')(decoded_layer_4) decoded_layer_6 = layers.Dense(64, activation='relu')(decoded_layer_5) autoencoder = keras.Model(inputs=dim_input, outputs=decoded_layer_6) autoencoder.compile(loss='categorical_crossentropy', metrics='categorical_accuracy') encoder = keras.Model(inputs=dim_input, outputs=encoded_layer_6) encoded_input = layers.Input(shape=(encoder_dim,)) encoded_train = pd.DataFrame(encoder.predict(X_train)) encoded_train = encoded_train.add_prefix('feature_') encoded_test = pd.DataFrame(encoder.predict(X_test)) encoded_test = encoded_test.add_prefix('feature_')
code
128014397/cell_3
[ "text_plain_output_1.png" ]
import math import numpy as np import pandas as pd pd.set_option('display.max_rows', None) from pathlib import Path from plotnine import * import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, models, Sequential from tensorflow.keras import backend as K import tensorflow_ranking as tfr from sklearn.model_selection import StratifiedKFold, KFold from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, QuantileTransformer, RobustScaler from sklearn.metrics import log_loss from colorama import Fore, Back, Style from tqdm import tqdm from umap import UMAP from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis def map3(y_true, y_pred, **kwargs): map3_metric = tfr.keras.metrics.MeanAveragePrecisionMetric(topn=3) return map3_metric(y_true, y_pred, **kwargs).numpy() RANDOM_STATE = 42 INCLUDE_ORIGINAL = False
code
105186076/cell_13
[ "image_output_1.png" ]
import numpy as np X.ravel()[:5] m = 78.35 b1 = 100 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) lr = 0.1 step_size = loss_slope * lr b1 = b1 - step_size b1 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) step_size = loss_slope * lr b2 = b1 - step_size b2
code
105186076/cell_9
[ "image_output_1.png" ]
from sklearn.datasets import make_regression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt X, y = make_regression(n_samples=4, n_features=1, n_informative=1, n_targets=1, noise=80, random_state=13) from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X, y) (reg.coef_, reg.intercept_) y_pred = (78.35 * X + 0).reshape(4) plt.scatter(X, y) plt.plot(X, reg.predict(X), color='red', label='OLS') plt.plot(X, y_pred, color='#ffa500', label='b(intersept) = 0-initial_random') plt.legend() plt.show()
code
105186076/cell_4
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X, y)
code
105186076/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.datasets import make_regression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt X, y = make_regression(n_samples=4, n_features=1, n_informative=1, n_targets=1, noise=80, random_state=13) from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X, y) (reg.coef_, reg.intercept_) plt.scatter(X, y) plt.plot(X, reg.predict(X), color='red') plt.show()
code
105186076/cell_11
[ "text_plain_output_1.png" ]
import numpy as np X.ravel()[:5] m = 78.35 b1 = 100 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) lr = 0.1 step_size = loss_slope * lr b1 = b1 - step_size b1
code
105186076/cell_15
[ "text_plain_output_1.png" ]
import numpy as np X.ravel()[:5] m = 78.35 b1 = 100 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) lr = 0.1 step_size = loss_slope * lr b1 = b1 - step_size b1 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) step_size = loss_slope * lr b2 = b1 - step_size b2 loss_slope = -2 * np.sum(y - m * X.ravel() - b2) step_size = loss_slope * lr b3 = b2 - step_size b3
code
105186076/cell_16
[ "text_plain_output_1.png" ]
from sklearn.datasets import make_regression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np X, y = make_regression(n_samples=4, n_features=1, n_informative=1, n_targets=1, noise=80, random_state=13) from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X, y) (reg.coef_, reg.intercept_) y_pred = (78.35 * X + 0).reshape(4) X.ravel()[:5] m = 78.35 b1 = 100 loss_slope = -2 * np.sum(y - m * X.ravel() - b1) lr = 0.1 step_size = loss_slope * lr b1 = b1 - step_size b1 y_pred1 = (78.35 * X + b1).reshape(4) loss_slope = -2 * np.sum(y - m * X.ravel() - b1) step_size = loss_slope * lr b2 = b1 - step_size b2 y_pred2 = (78.35 * X + b2).reshape(4) loss_slope = -2 * np.sum(y - m * X.ravel() - b2) step_size = loss_slope * lr b3 = b2 - step_size b3 y_pred3 = (78.35 * X + b3).reshape(4) plt.figure(figsize=(18, 6)) plt.scatter(X, y) plt.plot(X, reg.predict(X), color='red', label='OLS', linewidth=3) plt.plot(X, y_pred3, 'b--', label='b3 = {}_updated@step3'.format(b3)) plt.plot(X, y_pred2, color='#ffb347', label='b2 = {}_updated@step2'.format(b2)) plt.plot(X, y_pred1, color='#f8b878', label='b1 = {}_updated@step1'.format(b1)) plt.plot(X, y_pred, color='#ffa500', label='b = 0_initial_random') plt.legend() plt.show()
code
105186076/cell_3
[ "image_output_1.png" ]
from sklearn.datasets import make_regression import matplotlib.pyplot as plt X, y = make_regression(n_samples=4, n_features=1, n_informative=1, n_targets=1, noise=80, random_state=13) plt.scatter(X, y) plt.show()
code