path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122251329/cell_33
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs attribs = num_attribs + target ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None) i = 1 for col in num_attribs: plt.subplot(nrow, ncol, i) ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot ax.set_xlabel(col, fontsize=12) ax.set_ylabel("count", fontsize=12) sns.despine(right=True) sns.despine(offset=0, trim=False) i+=1 fig.delaxes(axs[nrow-1, ncol-1]) plt.suptitle('Distribution of Numerical Features', fontsize = 14); plt.tight_layout() ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) f, axes = plt.subplots(nrow, ncol, figsize=(8,6)) for name, ax in zip(num_attribs, axes.flatten()): sns.boxplot(y=name, x= "HeartDisease", data=hrz, orient='v', ax=ax) f.delaxes(axes[nrow-1, ncol-1]) plt.suptitle('Box-and-whisker plot', fontsize = 14); plt.tight_layout() grid = sns.FacetGrid(hrz, col='ST_Slope', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'RestingECG', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['Normal', 'LVH', 'ST'], palette='colorblind') grid.add_legend()
code
122251329/cell_44
[ "image_output_1.png" ]
from scipy import stats from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, KFold import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs attribs = num_attribs + target ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None) i = 1 for col in num_attribs: plt.subplot(nrow, ncol, i) ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot ax.set_xlabel(col, fontsize=12) ax.set_ylabel("count", fontsize=12) sns.despine(right=True) sns.despine(offset=0, trim=False) i+=1 fig.delaxes(axs[nrow-1, ncol-1]) plt.suptitle('Distribution of Numerical Features', fontsize = 14); plt.tight_layout() ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) f, axes = plt.subplots(nrow, ncol, figsize=(8,6)) for name, ax in zip(num_attribs, axes.flatten()): sns.boxplot(y=name, x= "HeartDisease", data=hrz, orient='v', ax=ax) f.delaxes(axes[nrow-1, ncol-1]) plt.suptitle('Box-and-whisker plot', fontsize = 14); plt.tight_layout() grid = sns.FacetGrid(hrz, col='ST_Slope', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'RestingECG', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['Normal', 'LVH', 'ST'], palette='colorblind') grid.add_legend() # the cramers_v function is taken from https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9 def cramers_v(x, y): confusion_matrix = pd.crosstab(x,y) chi2 = stats.chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2/n r,k = confusion_matrix.shape phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1)) rcorr = r-((r-1)**2)/(n-1) kcorr = k-((k-1)**2)/(n-1) return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1))) # calculate the correlation coefficients hrz_ = hrz[cat_attribs+target] rows= [] for x in hrz_: col = [] for y in hrz_ : col.append(cramers_v(hrz_[x], hrz_[y]) ) rows.append(col) cramers_results = np.array(rows) df = pd.DataFrame(cramers_results, columns = hrz_.columns, index = hrz_.columns) # heatmap plot mask = np.triu(np.ones_like(df, dtype=bool)) fig, ax = plt.subplots(figsize=(8, 6), facecolor=None) sns.heatmap(df, cmap=sns.color_palette("husl", as_cmap=True), vmin=0, vmax=1.0, center=0, annot=True, fmt='.2f', square=True, linewidths=.01, cbar_kws={"shrink": 0.8}) ax.set_title("Association between categorical variables (Cramer's V)", fontsize=14); def splitting(test_fraction, df, seed): split_instanse = StratifiedShuffleSplit(n_splits=1, test_size=test_fraction, random_state=seed) for train_index, test_index in split_instanse.split(df, df['HeartDisease']): strat_train_set = df.loc[train_index] strat_test_set = df.loc[test_index] return (strat_train_set, strat_test_set) seed = 123 test_fraction = 0.15 strat_train_set, strat_test_set = splitting(test_fraction, hrz, seed) print('Fractions of heart diseases in the original, train and test dataset: %.3f, %.3f, %.3f' % (hrz.loc[hrz['HeartDisease'] == 1].shape[0] / hrz.shape[0], strat_train_set.loc[strat_train_set['HeartDisease'] == 1].shape[0] / strat_train_set.shape[0], strat_test_set.loc[strat_test_set['HeartDisease'] == 1].shape[0] / strat_test_set.shape[0]))
code
122251329/cell_29
[ "image_output_1.png" ]
from IPython.display import display import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs for attr in cat_attribs: display(hrz[[attr, 'HeartDisease']].groupby(attr, as_index=False).mean().sort_values(by='HeartDisease', ascending=False))
code
122251329/cell_39
[ "image_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs attribs = num_attribs + target ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None) i = 1 for col in num_attribs: plt.subplot(nrow, ncol, i) ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot ax.set_xlabel(col, fontsize=12) ax.set_ylabel("count", fontsize=12) sns.despine(right=True) sns.despine(offset=0, trim=False) i+=1 fig.delaxes(axs[nrow-1, ncol-1]) plt.suptitle('Distribution of Numerical Features', fontsize = 14); plt.tight_layout() ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) f, axes = plt.subplots(nrow, ncol, figsize=(8,6)) for name, ax in zip(num_attribs, axes.flatten()): sns.boxplot(y=name, x= "HeartDisease", data=hrz, orient='v', ax=ax) f.delaxes(axes[nrow-1, ncol-1]) plt.suptitle('Box-and-whisker plot', fontsize = 14); plt.tight_layout() grid = sns.FacetGrid(hrz, col='ST_Slope', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'RestingECG', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['Normal', 'LVH', 'ST'], palette='colorblind') grid.add_legend() # the cramers_v function is taken from https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9 def cramers_v(x, y): confusion_matrix = pd.crosstab(x,y) chi2 = stats.chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2/n r,k = confusion_matrix.shape phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1)) rcorr = r-((r-1)**2)/(n-1) kcorr = k-((k-1)**2)/(n-1) return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1))) # calculate the correlation coefficients hrz_ = hrz[cat_attribs+target] rows= [] for x in hrz_: col = [] for y in hrz_ : col.append(cramers_v(hrz_[x], hrz_[y]) ) rows.append(col) cramers_results = np.array(rows) df = pd.DataFrame(cramers_results, columns = hrz_.columns, index = hrz_.columns) # heatmap plot mask = np.triu(np.ones_like(df, dtype=bool)) fig, ax = plt.subplots(figsize=(8, 6), facecolor=None) sns.heatmap(df, cmap=sns.color_palette("husl", as_cmap=True), vmin=0, vmax=1.0, center=0, annot=True, fmt='.2f', square=True, linewidths=.01, cbar_kws={"shrink": 0.8}) ax.set_title("Association between categorical variables (Cramer's V)", fontsize=14); corr_matrix = hrz[num_attribs + target].corr() dataplot = sns.heatmap(corr_matrix, cmap='YlGnBu', annot=True, fmt='.2f') plt.show()
code
122251329/cell_11
[ "text_html_output_1.png" ]
import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) hrz.info()
code
122251329/cell_19
[ "text_html_output_1.png" ]
import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) print(hrz[hrz['Cholesterol'] == 0].shape[0]) print(hrz[hrz['RestingBP'] == 0].shape[0])
code
122251329/cell_32
[ "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_1.png", "text_html_output_3.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs attribs = num_attribs + target ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None) i = 1 for col in num_attribs: plt.subplot(nrow, ncol, i) ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot ax.set_xlabel(col, fontsize=12) ax.set_ylabel("count", fontsize=12) sns.despine(right=True) sns.despine(offset=0, trim=False) i+=1 fig.delaxes(axs[nrow-1, ncol-1]) plt.suptitle('Distribution of Numerical Features', fontsize = 14); plt.tight_layout() ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) f, axes = plt.subplots(nrow, ncol, figsize=(8,6)) for name, ax in zip(num_attribs, axes.flatten()): sns.boxplot(y=name, x= "HeartDisease", data=hrz, orient='v', ax=ax) f.delaxes(axes[nrow-1, ncol-1]) plt.suptitle('Box-and-whisker plot', fontsize = 14); plt.tight_layout() grid = sns.FacetGrid(hrz, col='ST_Slope', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend()
code
122251329/cell_15
[ "text_plain_output_1.png" ]
from IPython.display import display import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs for attr in target + cat_attribs: display(hrz[attr].value_counts(normalize=True))
code
122251329/cell_17
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) hrz.describe()
code
122251329/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) print('MaxHR values for F, M:') print('HeartDisease=0', round(hrz.loc[(hrz['Sex'] == 'F') & (hrz['HeartDisease'] == 0)]['MaxHR'].mean(), 2), round(hrz.loc[(hrz['Sex'] == 'M') & (hrz['HeartDisease'] == 0)]['MaxHR'].mean(), 2)) print('HeartDisease=1', round(hrz.loc[(hrz['Sex'] == 'F') & (hrz['HeartDisease'] == 1)]['MaxHR'].mean(), 2), round(hrz.loc[(hrz['Sex'] == 'M') & (hrz['HeartDisease'] == 1)]['MaxHR'].mean(), 2)) print('Oldpeak values for F, M:') print('HeartDisease=0', round(hrz.loc[(hrz['Sex'] == 'F') & (hrz['HeartDisease'] == 0)]['Oldpeak'].mean(), 2), round(hrz.loc[(hrz['Sex'] == 'M') & (hrz['HeartDisease'] == 0)]['Oldpeak'].mean(), 2)) print('HeartDisease=1', round(hrz.loc[(hrz['Sex'] == 'F') & (hrz['HeartDisease'] == 1)]['Oldpeak'].mean(), 2), round(hrz.loc[(hrz['Sex'] == 'M') & (hrz['HeartDisease'] == 1)]['Oldpeak'].mean(), 2))
code
122251329/cell_37
[ "text_plain_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns csv_path = '/kaggle/input/heart-failure-prediction/heart.csv' hrz = pd.read_csv(csv_path) target = ['HeartDisease'] num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope'] cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina'] cat_attribs = cat_nom_attribs + cat_bin_attribs attribs = num_attribs + target ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None) i = 1 for col in num_attribs: plt.subplot(nrow, ncol, i) ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot ax.set_xlabel(col, fontsize=12) ax.set_ylabel("count", fontsize=12) sns.despine(right=True) sns.despine(offset=0, trim=False) i+=1 fig.delaxes(axs[nrow-1, ncol-1]) plt.suptitle('Distribution of Numerical Features', fontsize = 14); plt.tight_layout() ncol = 3 nrow = int(np.ceil(len(num_attribs)/ncol)) f, axes = plt.subplots(nrow, ncol, figsize=(8,6)) for name, ax in zip(num_attribs, axes.flatten()): sns.boxplot(y=name, x= "HeartDisease", data=hrz, orient='v', ax=ax) f.delaxes(axes[nrow-1, ncol-1]) plt.suptitle('Box-and-whisker plot', fontsize = 14); plt.tight_layout() grid = sns.FacetGrid(hrz, col='ST_Slope', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'ChestPainType', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['ASY', 'ATA', 'NAP', 'TA'], palette='colorblind') grid.add_legend() grid = sns.FacetGrid(hrz, col='ExerciseAngina', height=3.0, aspect=1.2) grid.map(sns.pointplot, 'RestingECG', 'HeartDisease', 'Sex', hue_order=['M', 'F'], order=['Normal', 'LVH', 'ST'], palette='colorblind') grid.add_legend() def cramers_v(x, y): confusion_matrix = pd.crosstab(x, y) chi2 = stats.chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2 / n r, k = confusion_matrix.shape phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1)) rcorr = r - (r - 1) ** 2 / (n - 1) kcorr = k - (k - 1) ** 2 / (n - 1) return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1)) hrz_ = hrz[cat_attribs + target] rows = [] for x in hrz_: col = [] for y in hrz_: col.append(cramers_v(hrz_[x], hrz_[y])) rows.append(col) cramers_results = np.array(rows) df = pd.DataFrame(cramers_results, columns=hrz_.columns, index=hrz_.columns) mask = np.triu(np.ones_like(df, dtype=bool)) fig, ax = plt.subplots(figsize=(8, 6), facecolor=None) sns.heatmap(df, cmap=sns.color_palette('husl', as_cmap=True), vmin=0, vmax=1.0, center=0, annot=True, fmt='.2f', square=True, linewidths=0.01, cbar_kws={'shrink': 0.8}) ax.set_title("Association between categorical variables (Cramer's V)", fontsize=14)
code
1009964/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.head(10)
code
1009964/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] NAs = pd.concat([train_df.isnull().sum(), test_df.isnull().sum()], axis=1, keys=['Train', 'Test']) NAs[NAs.sum(axis=1) > 1]
code
1009964/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import random as rnd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009964/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] NAs = pd.concat([train_df.isnull().sum(), test_df.isnull().sum()], axis=1, keys=['Train', 'Test']) NAs[NAs.sum(axis=1) > 1] train_df['FireplaceQu']
code
1009964/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.info()
code
1010160/cell_4
[ "text_plain_output_1.png" ]
from scipy.misc import imread import cv2 as cv import glob import numpy as np import os import random species = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'] select = 1000 ROWS = 90 COLS = 160 CHANNELS = 3 PATH = './input/' def get_image(file): pos1 = file.rfind('/img_') return file[pos1 + 1:] def get_id(file): pos1 = file.rfind('_') pos2 = file.rfind('.') return file[pos1 + 1:pos2] def load_train_data(select): train_files = sorted(glob.glob(PATH + '/train/*/*.jpg'), key=lambda x: random.random())[:select] train = np.array([imread(img) for img in train_files]) X_train = np.array([cv.resize(img, (ROWS, COLS)) for img in train]) y = np.array([species.index(os.path.dirname(img).replace(PATH + '/train/', '')) for img in train_files]) ids = np.array([get_id(img) for img in train_files]) X_train = np.array(X_train, dtype=np.float32) / 255 return (X_train, y, ids) def load_test_data(): test_files = sorted(glob.glob(PATH + '/test_stg1/*.jpg')) test = np.array([imread(img) for img in test_files]) X_test = np.array([cv.resize(img, (ROWS, COLS)) for img in test]) X_test = np.array(X_test, dtype=np.float32) / 255 ids = np.array([get_image(img) for img in test_files]) return (X_test, ids) X, y, ids = load_train_data(select) print(X.shape)
code
1010160/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Convolution2D,MaxPooling2D,Flatten,Activation from keras.layers import Dropout from keras.models import Sequential from keras.optimizers import Adam from scipy.misc import imread import cv2 as cv import glob import numpy as np import os import random species = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'] select = 1000 ROWS = 90 COLS = 160 CHANNELS = 3 PATH = './input/' def get_image(file): pos1 = file.rfind('/img_') return file[pos1 + 1:] def get_id(file): pos1 = file.rfind('_') pos2 = file.rfind('.') return file[pos1 + 1:pos2] def load_train_data(select): train_files = sorted(glob.glob(PATH + '/train/*/*.jpg'), key=lambda x: random.random())[:select] train = np.array([imread(img) for img in train_files]) X_train = np.array([cv.resize(img, (ROWS, COLS)) for img in train]) y = np.array([species.index(os.path.dirname(img).replace(PATH + '/train/', '')) for img in train_files]) ids = np.array([get_id(img) for img in train_files]) X_train = np.array(X_train, dtype=np.float32) / 255 return (X_train, y, ids) def load_test_data(): test_files = sorted(glob.glob(PATH + '/test_stg1/*.jpg')) test = np.array([imread(img) for img in test_files]) X_test = np.array([cv.resize(img, (ROWS, COLS)) for img in test]) X_test = np.array(X_test, dtype=np.float32) / 255 ids = np.array([get_image(img) for img in test_files]) return (X_test, ids) from keras.models import load_model from keras.layers import Dropout model = Sequential() model.add(Convolution2D(nb_filter=32, nb_row=5, nb_col=5, border_mode='same', input_shape=(3, ROWS, COLS))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(64, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(128, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Flatten()) model.add(Dense(128)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(8)) model.add(Dropout(0.5)) model.add(Activation('softmax')) adam = Adam() model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, nb_epoch=50, batch_size=32) loss, accuracy = model.evaluate(X_test, y_test) print('\n test loss:', loss) print('\n test accuracy', accuracy) model.save('my_mode.h5')
code
1010160/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 as cv import glob import random import numpy as np from scipy.misc import imread import os from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Convolution2D, MaxPooling2D, Flatten, Activation from keras.optimizers import Adam from sklearn.cross_validation import train_test_split
code
1010160/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Convolution2D,MaxPooling2D,Flatten,Activation from keras.layers import Dropout from keras.models import Sequential from keras.optimizers import Adam from scipy.misc import imread import cv2 as cv import glob import numpy as np import os import random species = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'] select = 1000 ROWS = 90 COLS = 160 CHANNELS = 3 PATH = './input/' def get_image(file): pos1 = file.rfind('/img_') return file[pos1 + 1:] def get_id(file): pos1 = file.rfind('_') pos2 = file.rfind('.') return file[pos1 + 1:pos2] def load_train_data(select): train_files = sorted(glob.glob(PATH + '/train/*/*.jpg'), key=lambda x: random.random())[:select] train = np.array([imread(img) for img in train_files]) X_train = np.array([cv.resize(img, (ROWS, COLS)) for img in train]) y = np.array([species.index(os.path.dirname(img).replace(PATH + '/train/', '')) for img in train_files]) ids = np.array([get_id(img) for img in train_files]) X_train = np.array(X_train, dtype=np.float32) / 255 return (X_train, y, ids) def load_test_data(): test_files = sorted(glob.glob(PATH + '/test_stg1/*.jpg')) test = np.array([imread(img) for img in test_files]) X_test = np.array([cv.resize(img, (ROWS, COLS)) for img in test]) X_test = np.array(X_test, dtype=np.float32) / 255 ids = np.array([get_image(img) for img in test_files]) return (X_test, ids) from keras.models import load_model from keras.layers import Dropout model = Sequential() model.add(Convolution2D(nb_filter=32, nb_row=5, nb_col=5, border_mode='same', input_shape=(3, ROWS, COLS))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(64, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(128, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Flatten()) model.add(Dense(128)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(8)) model.add(Dropout(0.5)) model.add(Activation('softmax')) adam = Adam() model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, nb_epoch=50, batch_size=32) loss, accuracy = model.evaluate(X_test, y_test) model.save('my_mode.h5') test, ids = load_test_data() data = test.transpose((0, 3, 2, 1)) predictions = model.predict(data, verbose=1)
code
1010160/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Convolution2D,MaxPooling2D,Flatten,Activation from keras.layers import Dropout from keras.models import Sequential from keras.optimizers import Adam from scipy.misc import imread import cv2 as cv import datetime import glob import numpy as np import os import pandas as pd import random species = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'] select = 1000 ROWS = 90 COLS = 160 CHANNELS = 3 PATH = './input/' def get_image(file): pos1 = file.rfind('/img_') return file[pos1 + 1:] def get_id(file): pos1 = file.rfind('_') pos2 = file.rfind('.') return file[pos1 + 1:pos2] def load_train_data(select): train_files = sorted(glob.glob(PATH + '/train/*/*.jpg'), key=lambda x: random.random())[:select] train = np.array([imread(img) for img in train_files]) X_train = np.array([cv.resize(img, (ROWS, COLS)) for img in train]) y = np.array([species.index(os.path.dirname(img).replace(PATH + '/train/', '')) for img in train_files]) ids = np.array([get_id(img) for img in train_files]) X_train = np.array(X_train, dtype=np.float32) / 255 return (X_train, y, ids) def load_test_data(): test_files = sorted(glob.glob(PATH + '/test_stg1/*.jpg')) test = np.array([imread(img) for img in test_files]) X_test = np.array([cv.resize(img, (ROWS, COLS)) for img in test]) X_test = np.array(X_test, dtype=np.float32) / 255 ids = np.array([get_image(img) for img in test_files]) return (X_test, ids) from keras.models import load_model from keras.layers import Dropout model = Sequential() model.add(Convolution2D(nb_filter=32, nb_row=5, nb_col=5, border_mode='same', input_shape=(3, ROWS, COLS))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(64, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Convolution2D(128, 5, 5, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same')) model.add(Flatten()) model.add(Dense(128)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(8)) model.add(Dropout(0.5)) model.add(Activation('softmax')) adam = Adam() model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, nb_epoch=50, batch_size=32) loss, accuracy = model.evaluate(X_test, y_test) model.save('my_mode.h5') test, ids = load_test_data() data = test.transpose((0, 3, 2, 1)) predictions = model.predict(data, verbose=1) import pandas as pd import datetime result1 = pd.DataFrame(predictions, columns=['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']) result1.loc[:, 'image'] = pd.Series(ids, index=result1.index) now = datetime.datetime.now() sub_file = 'submission_' + str(now.strftime('%Y-%m-%d-%H-%M')) + '.csv' result1.to_csv(sub_file, index=False)
code
121150047/cell_13
[ "text_plain_output_1.png" ]
from colorama import Style, Fore import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn.model_selection import KFold from sklearn.ensemble import GradientBoostingRegressor from lightgbm import LGBMRegressor rc = {'axes.facecolor': '#FFF9ED', 'figure.facecolor': '#FFF9ED', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4} sns.set(rc=rc) from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blue = Style.BRIGHT + Fore.BLUE megnta = Style.BRIGHT + Fore.MAGENTA gold = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL import warnings warnings.filterwarnings('ignore') old_df = pd.read_csv('/kaggle/input/regression-with-neural-networking/concrete_data.csv') original_df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv').drop(columns=['id']) test_df = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv').drop(columns=['id']) sample = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') original_num_cols_int = [col for col in original_df.columns if original_df[col].dtype == 'int'] original_num_cols_float = [col for col in original_df.columns if original_df[col].dtype == 'float'] original_num_cols_obj = [col for col in original_df.columns if original_df[col].dtype == 'o'] old_num_cols_int = [col for col in old_df.columns if old_df[col].dtype == 'int'] old_num_cols_float = [col for col in old_df.columns if old_df[col].dtype == 'float'] old_num_cols_obj = [col for col in old_df.columns if old_df[col].dtype == 'o'] test_num_cols_int = [col for col in test_df.columns if test_df[col].dtype == 'int'] test_num_cols_float = [col for col in test_df.columns if test_df[col].dtype == 'float'] test_num_cols_obj = [col for col in test_df.columns if test_df[col].dtype == 'o'] original_df_column = [col for col in original_df.columns] old_df_columns = [col for col in old_df.columns] name_cng_dict = {key: value for key, value in zip(old_df_columns, original_df_column)} old_df.rename(columns=name_cng_dict, inplace=True) fig,ax = plt.subplots(3,3,figsize=(15,10),dpi=100) ax = ax.flatten() for i,column in enumerate(original_df.columns[:-1]): plot_axes = [ax[i]] sns.kdeplot( original_df[column], label='Original_df', ax=ax[i], color='#9E3F00' ) sns.kdeplot( test_df[column], label='Test_df', ax=ax[i], color='yellow' ) sns.kdeplot( old_df[column], label='Old_df', ax=ax[i], color='#20BEFF' ) # titles ax[i].set_title(f'{column} Distribution'); ax[i].set_xlabel(None) # remove axes to show only one at the end plot_axes = [ax[i]] handles = [] labels = [] for plot_ax in plot_axes: handles += plot_ax.get_legend_handles_labels()[0] labels += plot_ax.get_legend_handles_labels()[1] plot_ax.legend().remove() for i in range(i+1, len(ax)): ax[i].axis('off') fig.suptitle(f'Dataset Feature Distributions\n\n\n', ha='center', fontweight='bold', fontsize=25) fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 0.96), fontsize=25, ncol=3) plt.tight_layout() # correlation corr = original_df.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio f.suptitle(f'Heatmap for Original DF\n\n\n', ha='center', fontweight='bold', fontsize=25) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) corr = old_df.corr() mask = np.triu(np.ones_like(corr, dtype=np.bool)) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) f.suptitle(f'Heatmap for Old DF\n\n\n', ha='center', fontweight='bold', fontsize=25) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
code
121150047/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from colorama import Style, Fore from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn.model_selection import KFold from sklearn.ensemble import GradientBoostingRegressor from lightgbm import LGBMRegressor rc = {'axes.facecolor': '#FFF9ED', 'figure.facecolor': '#FFF9ED', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4} sns.set(rc=rc) from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blue = Style.BRIGHT + Fore.BLUE megnta = Style.BRIGHT + Fore.MAGENTA gold = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL import warnings warnings.filterwarnings('ignore') old_df = pd.read_csv('/kaggle/input/regression-with-neural-networking/concrete_data.csv') original_df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv').drop(columns=['id']) test_df = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv').drop(columns=['id']) sample = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') original_num_cols_int = [col for col in original_df.columns if original_df[col].dtype == 'int'] original_num_cols_float = [col for col in original_df.columns if original_df[col].dtype == 'float'] original_num_cols_obj = [col for col in original_df.columns if original_df[col].dtype == 'o'] old_num_cols_int = [col for col in old_df.columns if old_df[col].dtype == 'int'] old_num_cols_float = [col for col in old_df.columns if old_df[col].dtype == 'float'] old_num_cols_obj = [col for col in old_df.columns if old_df[col].dtype == 'o'] test_num_cols_int = [col for col in test_df.columns if test_df[col].dtype == 'int'] test_num_cols_float = [col for col in test_df.columns if test_df[col].dtype == 'float'] test_num_cols_obj = [col for col in test_df.columns if test_df[col].dtype == 'o'] original_df_column = [col for col in original_df.columns] old_df_columns = [col for col in old_df.columns] name_cng_dict = {key: value for key, value in zip(old_df_columns, original_df_column)} old_df.rename(columns=name_cng_dict, inplace=True) fig,ax = plt.subplots(3,3,figsize=(15,10),dpi=100) ax = ax.flatten() for i,column in enumerate(original_df.columns[:-1]): plot_axes = [ax[i]] sns.kdeplot( original_df[column], label='Original_df', ax=ax[i], color='#9E3F00' ) sns.kdeplot( test_df[column], label='Test_df', ax=ax[i], color='yellow' ) sns.kdeplot( old_df[column], label='Old_df', ax=ax[i], color='#20BEFF' ) # titles ax[i].set_title(f'{column} Distribution'); ax[i].set_xlabel(None) # remove axes to show only one at the end plot_axes = [ax[i]] handles = [] labels = [] for plot_ax in plot_axes: handles += plot_ax.get_legend_handles_labels()[0] labels += plot_ax.get_legend_handles_labels()[1] plot_ax.legend().remove() for i in range(i+1, len(ax)): ax[i].axis('off') fig.suptitle(f'Dataset Feature Distributions\n\n\n', ha='center', fontweight='bold', fontsize=25) fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 0.96), fontsize=25, ncol=3) plt.tight_layout() # correlation corr = original_df.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio f.suptitle(f'Heatmap for Original DF\n\n\n', ha='center', fontweight='bold', fontsize=25) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) # correlation corr = old_df.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio f.suptitle(f'Heatmap for Old DF\n\n\n', ha='center', fontweight='bold', fontsize=25) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) final_df = pd.concat([original_df, old_df], ignore_index=True) def model_training(train, test, model): kf = KFold(n_splits=5) X = train.copy() test_ = test.copy() preprocess_pipeline = preprocess() predictions = [] scores = [] for i, (train, val) in enumerate(kf.split(X)): X_train = X.iloc[train].drop(columns=['Strength']) y_train = X['Strength'].iloc[train] y_train = y_train.to_numpy() X_val = X.iloc[val].drop(columns=['Strength']) y_val = X['Strength'].iloc[val] y_val = y_val.to_numpy() pipeline = Pipeline([('preprocess', preprocess_pipeline), ('training', model)]) pipeline.fit(X_train, y_train) test_pred = pipeline.predict(test_) predictions.append(test_pred) y_pred = pipeline.predict(X_val) mse = mean_squared_error(y_val, y_pred) rmse = np.sqrt(mse) scores.append(rmse) return (predictions, model) learning_rate = 0.2 n_estimators = 250 n_jobs = -1 max_depth = 5 min_child_weight = 2 gamma = 0.01 reg_alpha = 0.01 reg_lambda = 0.01 subsample = 0.8 colsample_bytree = 0.67 seed = 42 xgbr = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth, min_child_weight=min_child_weight, gamma=gamma, reg_alpha=reg_alpha, reg_lambda=reg_lambda, subsample=subsample, colsample_bytree=colsample_bytree, seed=seed, n_jobs=n_jobs) xgbr, model_xgb = model_training(final_df, test_df, xgbr)
code
121150047/cell_6
[ "text_plain_output_1.png" ]
from colorama import Style, Fore import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn.model_selection import KFold from sklearn.ensemble import GradientBoostingRegressor from lightgbm import LGBMRegressor rc = {'axes.facecolor': '#FFF9ED', 'figure.facecolor': '#FFF9ED', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4} sns.set(rc=rc) from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blue = Style.BRIGHT + Fore.BLUE megnta = Style.BRIGHT + Fore.MAGENTA gold = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL import warnings warnings.filterwarnings('ignore') old_df = pd.read_csv('/kaggle/input/regression-with-neural-networking/concrete_data.csv') original_df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv').drop(columns=['id']) test_df = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv').drop(columns=['id']) sample = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') print(f'{red} [Name] ->{blue} Shape\n') print(f'{gold}[+] {red} [original_df] -> {blue}{original_df.shape}\n') print(f'{gold}[+] {red} [old_df] -> {blue}{old_df.shape}\n') print(f'{gold}[+] {red} [test_df] -> {blue}{test_df.shape}\n') print('\n') print(f'{red} [Name] ->{blue} Missing Values\n') print(f'{gold}[+] {red} [original_df] -> {blue}{original_df.isna().any().any()}\n') print(f'{gold}[+] {red} [old_df] -> {blue}{old_df.isna().any().any()}\n') print(f'{gold}[+] {red} [test_df] -> {blue}{test_df.isna().any().any()}\n') print('\n') original_num_cols_int = [col for col in original_df.columns if original_df[col].dtype == 'int'] original_num_cols_float = [col for col in original_df.columns if original_df[col].dtype == 'float'] original_num_cols_obj = [col for col in original_df.columns if original_df[col].dtype == 'o'] old_num_cols_int = [col for col in old_df.columns if old_df[col].dtype == 'int'] old_num_cols_float = [col for col in old_df.columns if old_df[col].dtype == 'float'] old_num_cols_obj = [col for col in old_df.columns if old_df[col].dtype == 'o'] test_num_cols_int = [col for col in test_df.columns if test_df[col].dtype == 'int'] test_num_cols_float = [col for col in test_df.columns if test_df[col].dtype == 'float'] test_num_cols_obj = [col for col in test_df.columns if test_df[col].dtype == 'o'] print(f'{red} [Name] ->{blue} Dtype -> {megnta}Total Col.\n') print('\n') print(f'{gold}[+] {red} [original_df] -> {blue} int32 ->{megnta} {len(original_num_cols_int)} \n') print(f'{gold}[+] {red} [original_df] -> {blue} flaot32 ->{megnta} {len(original_num_cols_float)}\n') print(f'{gold}[+] {red} [original_df] -> {blue} obj ->{megnta} {len(original_num_cols_obj)}\n') print('\n') print(f'{gold}[+] {red} [old_df] -> {blue} int32 ->{megnta} {len(old_num_cols_int)} \n') print(f'{gold}[+] {red} [old_df] -> {blue} flaot32 ->{megnta} {len(old_num_cols_float)}\n') print(f'{gold}[+] {red} [old_df] -> {blue} obj ->{megnta} {len(old_num_cols_obj)}\n') print('\n') print(f'{gold}[+] {red} [test_df] -> {blue} int32 ->{megnta} {len(test_num_cols_int)} \n') print(f'{gold}[+] {red} [test_df] -> {blue} flaot32 ->{megnta} {len(test_num_cols_float)}\n') print(f'{gold}[+] {red} [test_df] -> {blue} obj ->{megnta} {len(test_num_cols_obj)}\n') print('\n')
code
121150047/cell_2
[ "image_output_1.png" ]
from colorama import Style, Fore import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn.model_selection import KFold from sklearn.ensemble import GradientBoostingRegressor from lightgbm import LGBMRegressor rc = {'axes.facecolor': '#FFF9ED', 'figure.facecolor': '#FFF9ED', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4} sns.set(rc=rc) from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blue = Style.BRIGHT + Fore.BLUE megnta = Style.BRIGHT + Fore.MAGENTA gold = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL import warnings warnings.filterwarnings('ignore')
code
121150047/cell_12
[ "text_html_output_1.png" ]
from colorama import Style, Fore import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.base import TransformerMixin from sklearn.model_selection import KFold from sklearn.ensemble import GradientBoostingRegressor from lightgbm import LGBMRegressor rc = {'axes.facecolor': '#FFF9ED', 'figure.facecolor': '#FFF9ED', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4} sns.set(rc=rc) from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blue = Style.BRIGHT + Fore.BLUE megnta = Style.BRIGHT + Fore.MAGENTA gold = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL import warnings warnings.filterwarnings('ignore') old_df = pd.read_csv('/kaggle/input/regression-with-neural-networking/concrete_data.csv') original_df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv').drop(columns=['id']) test_df = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv').drop(columns=['id']) sample = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') original_num_cols_int = [col for col in original_df.columns if original_df[col].dtype == 'int'] original_num_cols_float = [col for col in original_df.columns if original_df[col].dtype == 'float'] original_num_cols_obj = [col for col in original_df.columns if original_df[col].dtype == 'o'] old_num_cols_int = [col for col in old_df.columns if old_df[col].dtype == 'int'] old_num_cols_float = [col for col in old_df.columns if old_df[col].dtype == 'float'] old_num_cols_obj = [col for col in old_df.columns if old_df[col].dtype == 'o'] test_num_cols_int = [col for col in test_df.columns if test_df[col].dtype == 'int'] test_num_cols_float = [col for col in test_df.columns if test_df[col].dtype == 'float'] test_num_cols_obj = [col for col in test_df.columns if test_df[col].dtype == 'o'] original_df_column = [col for col in original_df.columns] old_df_columns = [col for col in old_df.columns] name_cng_dict = {key: value for key, value in zip(old_df_columns, original_df_column)} old_df.rename(columns=name_cng_dict, inplace=True) fig,ax = plt.subplots(3,3,figsize=(15,10),dpi=100) ax = ax.flatten() for i,column in enumerate(original_df.columns[:-1]): plot_axes = [ax[i]] sns.kdeplot( original_df[column], label='Original_df', ax=ax[i], color='#9E3F00' ) sns.kdeplot( test_df[column], label='Test_df', ax=ax[i], color='yellow' ) sns.kdeplot( old_df[column], label='Old_df', ax=ax[i], color='#20BEFF' ) # titles ax[i].set_title(f'{column} Distribution'); ax[i].set_xlabel(None) # remove axes to show only one at the end plot_axes = [ax[i]] handles = [] labels = [] for plot_ax in plot_axes: handles += plot_ax.get_legend_handles_labels()[0] labels += plot_ax.get_legend_handles_labels()[1] plot_ax.legend().remove() for i in range(i+1, len(ax)): ax[i].axis('off') fig.suptitle(f'Dataset Feature Distributions\n\n\n', ha='center', fontweight='bold', fontsize=25) fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 0.96), fontsize=25, ncol=3) plt.tight_layout() corr = original_df.corr() mask = np.triu(np.ones_like(corr, dtype=np.bool)) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) f.suptitle(f'Heatmap for Original DF\n\n\n', ha='center', fontweight='bold', fontsize=25) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
code
121150047/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd old_df = pd.read_csv('/kaggle/input/regression-with-neural-networking/concrete_data.csv') original_df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv').drop(columns=['id']) test_df = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv').drop(columns=['id']) sample = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') original_df.head(3)
code
90108212/cell_13
[ "text_html_output_1.png" ]
data
code
90108212/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') women = train_data.loc[train_data.Sex == 'female']['Survived'] rate_women = sum(women) / len(women) men = train_data.loc[train_data.Sex == 'male']['Survived'] rate_men = sum(men) / len(women) train_data
code
90108212/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_data.tail()
code
90108212/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') women = train_data.loc[train_data.Sex == 'female']['Survived'] rate_women = sum(women) / len(women) print('% of women who survived:', rate_women)
code
90108212/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') women = train_data.loc[train_data.Sex == 'female']['Survived'] rate_women = sum(women) / len(women) men = train_data.loc[train_data.Sex == 'male']['Survived'] rate_men = sum(men) / len(women) print('% of men who survived:', rate_men)
code
90108212/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_data.head()
code
90108212/cell_12
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') women = train_data.loc[train_data.Sex == 'female']['Survived'] rate_women = sum(women) / len(women) men = train_data.loc[train_data.Sex == 'male']['Survived'] rate_men = sum(men) / len(women) from sklearn.ensemble import RandomForestClassifier y = train_data['Survived'] features = ['Pclass', 'Sex', 'SibSp', 'Parch'] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print('Your submission was successfully saved!')
code
90108212/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') gender.head()
code
90148441/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_new = [data_users['Age'], data_movies['Genres']] headers_new = ['Age1', 'Genres1'] df4 = pd.concat(data_new, axis=1, keys=headers_new) sns.regplot(x=df4['Age1'], y=df4['Genres1'])
code
90148441/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') sns.kdeplot(data=data_users['Age'], shade=True)
code
90148441/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_movies.head()
code
90148441/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_ratings.head()
code
90148441/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_new = [data_users['Age'], data_movies['Genres']] headers_new = ['Age1', 'Genres1'] df4 = pd.concat(data_new, axis=1, keys=headers_new) df4.head()
code
90148441/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90148441/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_ratings.sort_values(by='Rating', ascending=False).head(25)
code
90148441/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1') data_users.head()
code
90148441/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1') data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1') data_users = pd.read_csv('../input/movielens/users.dat', sep='::', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
code
322568/cell_13
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1 titanic.loc[titanic['Embarked'] == 'S'] = 0 titanic.loc[titanic['Embarked'] == 'C'] = 1 titanic.loc[titanic['Embarked'] == 'Q'] = 2 print(titanic['Embarked'])
code
322568/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1 print(titanic['Sex'])
code
322568/cell_4
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') print(titanic.head())
code
322568/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) titanic = pd.read_csv('../input/train.csv') print(titanic.describe())
code
322568/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 print(titanic['Sex'])
code
322568/cell_15
[ "text_plain_output_1.png" ]
from sklearn import cross_validation from sklearn.linear_model import LogisticRegression from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1 titanic.loc[titanic['Embarked'] == 'S'] = 0 titanic.loc[titanic['Embarked'] == 'C'] = 1 titanic.loc[titanic['Embarked'] == 'Q'] = 2 from sklearn.linear_model import LinearRegression from sklearn.cross_validation import KFold predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Embarked', 'Fare'] from sklearn import cross_validation from sklearn.linear_model import LogisticRegression alg = LogisticRegression(random_state=1) accuracy = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=3) mean = np.mean(accuracy) titanic_test = pd.read_csv('../input/test.csv') titanic_test['Age'] = titanic_test['Age'].fillna(titanic['Age'].median()) titanic_test['Fare'] = titanic_test['Fare'].fillna(titanic_test['Fare'].median()) titanic_test.loc[titanic_test['Sex'] == 'male', 'Sex'] = 0 titanic_test.loc[titanic_test['Sex'] == 'female', 'Sex'] = 1 titanic_test['Embarked'] = titanic_test['Embarked'].fillna('S') titanic_test.loc[titanic_test['Embarked'] == 'S', 'Embarked'] = 0 titanic_test.loc[titanic_test['Embarked'] == 'C', 'Embarked'] = 1 titanic_test.loc[titanic_test['Embarked'] == 'Q', 'Embarked'] = 2 alg = LogisticRegression(random_state=1) alg.fit(titanic[predictors], titanic['Survived']) predictions = alg.predict(titanic_test[predictors]) print(len(predictions))
code
322568/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') print(titanic.describe())
code
322568/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.cross_validation import KFold predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Embarked', 'Fare']
code
322568/cell_10
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1 print(titanic['Embarked'].count()) print(titanic['Embarked'].unique())
code
322568/cell_12
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1 print(titanic['Embarked'].unique())
code
322568/cell_5
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output titanic = pd.read_csv('../input/train.csv') print(titanic['Cabin'].count())
code
34147377/cell_63
[ "text_plain_output_1.png" ]
from scipy.special import boxcox1p import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) model_data.shape numeric_feats = model_data.dtypes[model_data.dtypes != 'object'].index numeric_feats = [elem for elem in numeric_feats if elem not in ('Id', 'SalePrice')] from scipy.special import boxcox1p lam = 0.15 for feat in numeric_feats: model_data[feat] = boxcox1p(model_data[feat], lam) model_data_new = model_data.copy() model_data_new = model_data_new.drop(['Id'], axis=1) model_data_new = pd.get_dummies(model_data_new) model_data_updated = pd.concat([model_data['Id'], model_data_new], axis=1) model_train_data = model_data_updated[model_data_updated.Id < 1461] model_test_data = model_data_updated[model_data_updated.Id > 1460] print('Train Data Shape: ', model_train_data.shape) print('Test Data Shape : ', model_test_data.shape)
code
34147377/cell_21
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) print('There are %d Num , %d Cat, %d Num-Cat columns.' % (len(num_cols), len(cat_cols), len(num_to_cat_cols)))
code
34147377/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test_data.shape
code
34147377/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:, i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60)
code
34147377/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:,i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:,i].dropna(), rug=False, hist=False, kde_kws={'bw':0.1}) plt.xlabel(num_data.columns[i]) fig = plt.figure(figsize=(12, 18)) for i in range(len(num_data.columns)): fig.add_subplot(9, 4, i + 1) sns.scatterplot(num_data.iloc[:, i], num_data['SalePrice']) plt.tight_layout() plt.show()
code
34147377/cell_33
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_28.png", "image_output_23.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_27.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "image_output_3.png", "image_output_29.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_26.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:,i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:,i].dropna(), rug=False, hist=False, kde_kws={'bw':0.1}) plt.xlabel(num_data.columns[i]) fig = plt.figure(figsize=(12,18)) for i in range(len(num_data.columns)): fig.add_subplot(9, 4, i+1) sns.scatterplot(num_data.iloc[:, i], num_data['SalePrice']) plt.tight_layout() plt.show() corr_matrix = train_data[num_cols].corr() corr_matrix['SalePrice'].sort_values(ascending=False) sns.heatmap(corr_matrix)
code
34147377/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:,i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:, i].dropna(), rug=False, hist=False, kde_kws={'bw': 0.1}) plt.xlabel(num_data.columns[i])
code
34147377/cell_48
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) model_data.tail()
code
34147377/cell_61
[ "image_output_1.png" ]
from scipy.special import boxcox1p import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) model_data.shape numeric_feats = model_data.dtypes[model_data.dtypes != 'object'].index numeric_feats = [elem for elem in numeric_feats if elem not in ('Id', 'SalePrice')] from scipy.special import boxcox1p lam = 0.15 for feat in numeric_feats: model_data[feat] = boxcox1p(model_data[feat], lam) model_data_new = model_data.copy() model_data_new = model_data_new.drop(['Id'], axis=1) model_data_new = pd.get_dummies(model_data_new) model_data_updated = pd.concat([model_data['Id'], model_data_new], axis=1) model_data_updated.head()
code
34147377/cell_54
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) model_data.shape
code
34147377/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape
code
34147377/cell_18
[ "text_plain_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() from scipy import stats fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() stats.probplot(np.log1p(train_data['SalePrice']), plot=plt)
code
34147377/cell_32
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_39.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_4.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "image_output_3.png", "image_output_29.png", "image_output_43.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) corr_matrix = train_data[num_cols].corr() corr_matrix['SalePrice'].sort_values(ascending=False)
code
34147377/cell_51
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:,i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:,i].dropna(), rug=False, hist=False, kde_kws={'bw':0.1}) plt.xlabel(num_data.columns[i]) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) fig = plt.figure(figsize=(12,18)) for i in range(len(num_data.columns)): fig.add_subplot(9, 4, i+1) sns.scatterplot(num_data.iloc[:, i], num_data['SalePrice']) plt.tight_layout() plt.show() corr_matrix = train_data[num_cols].corr() corr_matrix['SalePrice'].sort_values(ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) sns.lmplot('Age', 'SalePrice', data=model_data, height=8) plt.title('Age vs SalePrice') plt.show()
code
34147377/cell_59
[ "text_html_output_1.png" ]
from scipy.special import boxcox1p import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape test_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False) train_data = train_data.drop(train_data[train_data['Id'] == 524].index) train_data = train_data.drop(train_data[train_data['Id'] == 1299].index) model_data = pd.concat([train_data, test_data], axis=0, sort=None, ignore_index=True) model_data.shape numeric_feats = model_data.dtypes[model_data.dtypes != 'object'].index numeric_feats = [elem for elem in numeric_feats if elem not in ('Id', 'SalePrice')] from scipy.special import boxcox1p lam = 0.15 for feat in numeric_feats: model_data[feat] = boxcox1p(model_data[feat], lam) model_data.head()
code
34147377/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape train_data.info()
code
34147377/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10, 5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show()
code
34147377/cell_35
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() fig = plt.figure(figsize=(10,5)) sns.distplot(np.log1p(train_data['SalePrice'])) plt.tight_layout() plt.show() cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) for i in range(len(cat_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.countplot(cat_data.iloc[:,i].dropna()) plt.xlabel(cat_data.columns[i]) plt.xticks(rotation=60) for i in range(len(num_data.columns)): f, ax = plt.subplots(figsize=(7, 4)) fig = sns.distplot(num_data.iloc[:,i].dropna(), rug=False, hist=False, kde_kws={'bw':0.1}) plt.xlabel(num_data.columns[i]) fig = plt.figure(figsize=(12,18)) for i in range(len(num_data.columns)): fig.add_subplot(9, 4, i+1) sns.scatterplot(num_data.iloc[:, i], num_data['SalePrice']) plt.tight_layout() plt.show() corr_matrix = train_data[num_cols].corr() corr_matrix['SalePrice'].sort_values(ascending=False) sns.lmplot('GrLivArea', 'SalePrice', data=train_data, height=8) plt.title('GrLivArea vs SalePrice') plt.show()
code
34147377/cell_14
[ "text_plain_output_1.png" ]
from scipy import stats import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10,5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show() from scipy import stats stats.probplot(train_data['SalePrice'], plot=plt)
code
34147377/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test_data.shape test_data.info()
code
34147377/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) skew_dict = {} for cols in num_cols: skew_dict[cols] = {'Skewness': train_data[cols].skew()} skew_df = pd.DataFrame(skew_dict).transpose() skew_df.columns = ['Skewness'] skew_df.sort_values(by=['Skewness'], ascending=False)
code
34147377/cell_37
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape cat_data = train_data.select_dtypes(include='object') cat_cols = cat_data.columns num_data = train_data.select_dtypes(exclude='object') num_cols = num_data.columns num_to_cat_cols = ['MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'] num_cols = [i for i in num_cols if not i in num_to_cat_cols] num_cols = [i for i in num_cols if not i in ['Id']] num_data = num_data.drop(['Id', 'MSSubClass', 'MoSold', 'YrSold', 'OverallQual', 'OverallCond'], axis=1) train_data[(train_data['GrLivArea'] > 4000) & (train_data['SalePrice'] < 300000)]
code
34147377/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.shape fig = plt.figure(figsize=(10, 5)) sns.distplot(train_data['SalePrice']) plt.tight_layout() plt.show()
code
34147377/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.head()
code
106191058/cell_8
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import requests import seaborn as sns token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af' base_url = 'https://api.ethplorer.io' url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey' response = requests.get(url) if response.status_code == 200: token_info_response = json.loads(response.text) token_info_response url = base_url + f'/getTopTokenHolders/{token_address}?apiKey=freekey&limit=100' response = requests.get(url) if response.status_code == 200: token_holders_response = json.loads(response.text) token_holders_df = pd.DataFrame(token_holders_response['holders']) n_top_holders_list = ['1', '2', '3', '5', '10', '25', '100'] shares_list = [round(token_holders_df['share'].values[:int(n)].sum(), 2) for n in n_top_holders_list] plt.figure(figsize=(12, 6)) ax = sns.barplot(x=n_top_holders_list, y=shares_list, alpha=0.8, color=color[3]) ax.bar_label(ax.containers[0]) plt.xlabel('Top N Wallets', fontsize=12) plt.ylabel('Cumulative percentage of Token share', fontsize=12) plt.title('Percentage of tokens hodl by top N wallets', fontsize=16) plt.show()
code
106191058/cell_5
[ "text_plain_output_1.png" ]
import json import requests token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af' base_url = 'https://api.ethplorer.io' url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey' response = requests.get(url) if response.status_code == 200: token_info_response = json.loads(response.text) token_info_response
code
106194498/cell_2
[ "text_plain_output_1.png" ]
a = 'Functions' len(a)
code
106194498/cell_5
[ "text_plain_output_1.png" ]
def my_first_function(): pass my_first_function()
code
17096880/cell_13
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB import numpy as np def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
17096880/cell_25
[ "text_plain_output_1.png" ]
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique() def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota lbl_enc = preprocessing.LabelEncoder() y = lbl_enc.fit_transform(train.author.values) tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) svd = decomposition.TruncatedSVD(n_components=120) svd.fit(xtrain_tfv) xtrain_svd = svd.transform(xtrain_tfv) xvalid_svd = svd.transform(xvalid_tfv) scl = preprocessing.StandardScaler() scl.fit(xtrain_svd) xtrain_svd_scl = scl.transform(xtrain_svd) xvalid_svd_scl = scl.transform(xvalid_svd) clf = SVC(C=1.0, probability=True) clf.fit(xtrain_svd_scl, ytrain) predictions = clf.predict_proba(xvalid_svd_scl) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_tfv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_tfv.tocsc()) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_ctv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_ctv.tocsc()) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_svd, ytrain) predictions = clf.predict_proba(xvalid_svd) mll_scorer = metrics.make_scorer(multiclass_logloss, greater_is_better=False, needs_proba=True) svd = TruncatedSVD() scl = preprocessing.StandardScaler() lr_model = LogisticRegression() clf = pipeline.Pipeline([('svd', svd), ('scl', scl), ('lr', lr_model)]) param_grid = {'svd__n_components': [120, 180], 'lr__C': [0.1, 1.0, 10], 'lr__penalty': ['l1', 'l2']} model = GridSearchCV(estimator=clf, param_grid=param_grid, scoring=mll_scorer, verbose=1, n_jobs=-1, iid=True, refit=True, cv=2) model.fit(xtrain_tfv, ytrain) print('Best score: %0.3f' % model.best_score_) print('Best parameters set:') best_parameters = model.best_estimator_.get_params() for param_name in sorted(param_grid.keys()): print('\t%s: %r' % (param_name, best_parameters[param_name]))
code
17096880/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique()
code
17096880/cell_20
[ "text_plain_output_1.png" ]
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique() def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota lbl_enc = preprocessing.LabelEncoder() y = lbl_enc.fit_transform(train.author.values) tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) svd = decomposition.TruncatedSVD(n_components=120) svd.fit(xtrain_tfv) xtrain_svd = svd.transform(xtrain_tfv) xvalid_svd = svd.transform(xvalid_tfv) scl = preprocessing.StandardScaler() scl.fit(xtrain_svd) xtrain_svd_scl = scl.transform(xtrain_svd) xvalid_svd_scl = scl.transform(xvalid_svd) clf = SVC(C=1.0, probability=True) clf.fit(xtrain_svd_scl, ytrain) predictions = clf.predict_proba(xvalid_svd_scl) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_tfv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_tfv.tocsc()) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_ctv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_ctv.tocsc()) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_svd, ytrain) predictions = clf.predict_proba(xvalid_svd) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
17096880/cell_19
[ "text_plain_output_1.png" ]
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique() def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota lbl_enc = preprocessing.LabelEncoder() y = lbl_enc.fit_transform(train.author.values) tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) svd = decomposition.TruncatedSVD(n_components=120) svd.fit(xtrain_tfv) xtrain_svd = svd.transform(xtrain_tfv) xvalid_svd = svd.transform(xvalid_tfv) scl = preprocessing.StandardScaler() scl.fit(xtrain_svd) xtrain_svd_scl = scl.transform(xtrain_svd) xvalid_svd_scl = scl.transform(xvalid_svd) clf = SVC(C=1.0, probability=True) clf.fit(xtrain_svd_scl, ytrain) predictions = clf.predict_proba(xvalid_svd_scl) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_tfv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_tfv.tocsc()) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_ctv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_ctv.tocsc()) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
17096880/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords import os import pandas as pd import numpy as np import xgboost as xgb from tqdm import tqdm from sklearn.svm import SVC from keras.models import Sequential from keras.layers.recurrent import LSTM, GRU from keras.layers.core import Dense, Activation, Dropout from keras.layers.embeddings import Embedding from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.model_selection import GridSearchCV from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from keras.layers import GlobalMaxPooling1D, Conv1D, MaxPooling1D, Flatten, Bidirectional, SpatialDropout1D from keras.preprocessing import sequence, text from keras.callbacks import EarlyStopping from nltk import word_tokenize from nltk.corpus import stopwords stop_words = stopwords.words('english') import os print(os.listdir('../input'))
code
17096880/cell_18
[ "text_plain_output_1.png" ]
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique() def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota lbl_enc = preprocessing.LabelEncoder() y = lbl_enc.fit_transform(train.author.values) tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) svd = decomposition.TruncatedSVD(n_components=120) svd.fit(xtrain_tfv) xtrain_svd = svd.transform(xtrain_tfv) xvalid_svd = svd.transform(xvalid_tfv) scl = preprocessing.StandardScaler() scl.fit(xtrain_svd) xtrain_svd_scl = scl.transform(xtrain_svd) xvalid_svd_scl = scl.transform(xvalid_svd) clf = SVC(C=1.0, probability=True) clf.fit(xtrain_svd_scl, ytrain) predictions = clf.predict_proba(xvalid_svd_scl) clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) clf.fit(xtrain_tfv.tocsc(), ytrain) predictions = clf.predict_proba(xvalid_tfv.tocsc()) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
17096880/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.author.nunique() def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota lbl_enc = preprocessing.LabelEncoder() y = lbl_enc.fit_transform(train.author.values) tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) ctv = CountVectorizer(analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english') ctv.fit(list(xtrain) + list(xvalid)) xtrain_ctv = ctv.transform(xtrain) xvalid_ctv = ctv.transform(xvalid) clf = MultinomialNB() clf.fit(xtrain_ctv, ytrain) predictions = clf.predict_proba(xvalid_ctv) svd = decomposition.TruncatedSVD(n_components=120) svd.fit(xtrain_tfv) xtrain_svd = svd.transform(xtrain_tfv) xvalid_svd = svd.transform(xvalid_tfv) scl = preprocessing.StandardScaler() scl.fit(xtrain_svd) xtrain_svd_scl = scl.transform(xtrain_svd) xvalid_svd_scl = scl.transform(xvalid_svd) clf = SVC(C=1.0, probability=True) clf.fit(xtrain_svd_scl, ytrain) predictions = clf.predict_proba(xvalid_svd_scl) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
17096880/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample = pd.read_csv('../input/sample_submission.csv') train.head(3)
code
17096880/cell_10
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import LogisticRegression import numpy as np def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english') tfv.fit(list(xtrain) + list(xvalid)) xtrain_tfv = tfv.transform(xtrain) xvalid_tfv = tfv.transform(xvalid) clf = LogisticRegression(C=1.0) clf.fit(xtrain_tfv, ytrain) predictions = clf.predict_proba(xvalid_tfv) print('logloss: %0.3f ' % multiclass_logloss(yvalid, predictions))
code
122264561/cell_42
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) plt.figure(figsize=(10, 6), dpi=200) sns.countplot(x='type_sentiment', data=df_react) plt.xlabel('Reaction_Type') plt.ylabel('Count') plt.title('Number Of Diff Reactions') plt.savefig('Number Of Diff Reactions2.jpeg')
code
122264561/cell_63
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content['type_sentiment']
code
122264561/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_content = df_content.drop(['Unnamed: 0'], axis=1) df_content
code
122264561/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_user
code
122264561/cell_83
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_user
code
122264561/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum() df_react[df_react['Type_score'].isnull()].isnull().sum()
code
122264561/cell_87
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_profile = df_profile.drop('Unnamed: 0', axis=1) type(df_profile['Interests'][2])
code