path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
18139612/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.columns.names = ['bank ticker', 'stock info']
Banks_Stock.xs(key='Close', axis=1, level='stock info').max()
returns = pd.DataFrame()
for tick in tickers:
returns[tick + 'return'] = Banks_Stock[tick]['Close'].pct_change()
returns.head() | code |
18139612/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.columns.names = ['bank ticker', 'stock info']
Banks_Stock.xs(key='Close', axis=1, level='stock info').max()
returns = pd.DataFrame()
for tick in tickers:
returns[tick + 'return'] = Banks_Stock[tick]['Close'].pct_change()
import seaborn as sns
sns.pairplot(returns[1:]) | code |
18139612/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.head() | code |
18139612/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.columns.names = ['bank ticker', 'stock info']
Banks_Stock.xs(key='Close', axis=1, level='stock info').max()
returns = pd.DataFrame()
for tick in tickers:
returns[tick + 'return'] = Banks_Stock[tick]['Close'].pct_change()
returns.idxmax() | code |
18139612/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.columns.names = ['bank ticker', 'stock info']
Banks_Stock.xs(key='Close', axis=1, level='stock info').max()
returns = pd.DataFrame()
for tick in tickers:
returns[tick + 'return'] = Banks_Stock[tick]['Close'].pct_change()
returns.idxmax()
returns.idxmin()
returns.std() | code |
18139612/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
tickers = ['BAC', 'C', 'GS', 'JPM', 'MS', 'WFC']
Banks_Stock = pd.concat([BAC, C, GS, JPM, MS, WFC], axis=1, keys=tickers)
Banks_Stock.columns.names = ['bank ticker', 'stock info']
Banks_Stock.xs(key='Close', axis=1, level='stock info').max() | code |
105190066/cell_25 | [
"text_plain_output_1.png"
] | from scipy import stats
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show()
df_train["SalePrice"] = np.log1p(df_train["SalePrice"])
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show()
y_train = df_train['SalePrice']
df = pd.concat((df_train, df_test)).reset_index(drop=True)
df = df.drop(['Id', 'SalePrice'], axis=1)
df_1 = df.copy()
for feature in ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrType', 'MSSubClass']:
df_1[feature] = df_1[feature].fillna('None')
df_2 = df_1.copy()
for feature in ['GarageYrBlt', 'GarageArea', 'GarageCars', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'MasVnrArea']:
df_2[feature] = df_2[feature].fillna(0)
df_3 = df_2.copy()
for feature in ['MSZoning', 'Electrical', 'KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType']:
df_3[feature] = df_3[feature].fillna(df_3[feature].mode()[0])
df_4 = df_3.copy()
df_4['Functional'] = df_4['Functional'].fillna('Typ')
df_5 = df_4.copy()
df_5 = df_5.drop(['Utilities'], axis=1)
df_6 = df_5.copy()
df_6['LotFrontage'] = df_6.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
df_7 = df_6.copy()
df_7 = pd.get_dummies(df_7)
df_8 = df_7.copy()
numerical_features = df_8.dtypes[df_8.dtypes != 'object'].index
df_train = df_8[:df_train.shape[0]]
df_test = df_8[df_train.shape[0]:]
lr = LinearRegression()
lr.fit(df_train, y_train)
y_pred = lr.predict(df_test)
y_pred | code |
105190066/cell_23 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
y_train = df_train['SalePrice']
df = pd.concat((df_train, df_test)).reset_index(drop=True)
df = df.drop(['Id', 'SalePrice'], axis=1)
df_1 = df.copy()
for feature in ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrType', 'MSSubClass']:
df_1[feature] = df_1[feature].fillna('None')
df_2 = df_1.copy()
for feature in ['GarageYrBlt', 'GarageArea', 'GarageCars', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'MasVnrArea']:
df_2[feature] = df_2[feature].fillna(0)
df_3 = df_2.copy()
for feature in ['MSZoning', 'Electrical', 'KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType']:
df_3[feature] = df_3[feature].fillna(df_3[feature].mode()[0])
df_4 = df_3.copy()
df_4['Functional'] = df_4['Functional'].fillna('Typ')
df_5 = df_4.copy()
df_5 = df_5.drop(['Utilities'], axis=1)
df_6 = df_5.copy()
df_6['LotFrontage'] = df_6.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
df_7 = df_6.copy()
df_7 = pd.get_dummies(df_7)
df_8 = df_7.copy()
numerical_features = df_8.dtypes[df_8.dtypes != 'object'].index
df_train = df_8[:df_train.shape[0]]
df_test = df_8[df_train.shape[0]:]
lr = LinearRegression()
lr.fit(df_train, y_train) | code |
105190066/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105190066/cell_7 | [
"image_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show() | code |
105190066/cell_8 | [
"text_plain_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show()
df_train['SalePrice'] = np.log1p(df_train['SalePrice'])
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show() | code |
105190066/cell_24 | [
"image_output_1.png"
] | from scipy import stats
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show()
df_train["SalePrice"] = np.log1p(df_train["SalePrice"])
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
plt.show()
y_train = df_train['SalePrice']
df = pd.concat((df_train, df_test)).reset_index(drop=True)
df = df.drop(['Id', 'SalePrice'], axis=1)
df_1 = df.copy()
for feature in ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrType', 'MSSubClass']:
df_1[feature] = df_1[feature].fillna('None')
df_2 = df_1.copy()
for feature in ['GarageYrBlt', 'GarageArea', 'GarageCars', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'MasVnrArea']:
df_2[feature] = df_2[feature].fillna(0)
df_3 = df_2.copy()
for feature in ['MSZoning', 'Electrical', 'KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType']:
df_3[feature] = df_3[feature].fillna(df_3[feature].mode()[0])
df_4 = df_3.copy()
df_4['Functional'] = df_4['Functional'].fillna('Typ')
df_5 = df_4.copy()
df_5 = df_5.drop(['Utilities'], axis=1)
df_6 = df_5.copy()
df_6['LotFrontage'] = df_6.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
df_7 = df_6.copy()
df_7 = pd.get_dummies(df_7)
df_8 = df_7.copy()
numerical_features = df_8.dtypes[df_8.dtypes != 'object'].index
df_train = df_8[:df_train.shape[0]]
df_test = df_8[df_train.shape[0]:]
lr = LinearRegression()
lr.fit(df_train, y_train)
plt.scatter(lr.predict(df_train), y_train)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show() | code |
105194794/cell_9 | [
"text_plain_output_1.png"
] | liability = 14589
asset = 4000
liability = 4000
asset = 4000
liability = 4000
asset = 4000
if liability >= asset:
print(' asset deficiency')
else:
print('going good') | code |
105194794/cell_11 | [
"text_plain_output_1.png"
] | liability = 14589
asset = 4000
liability = 4000
asset = 4000
liability = 4000
asset = 4000
liability = 14589
asset = 4000
if liability <= asset:
print(' going good')
else:
print('asset deficiency') | code |
105194794/cell_7 | [
"text_plain_output_1.png"
] | liability = 14589
asset = 4000
if liability >= asset:
print(' asset deficiency')
else:
print('going good') | code |
105194794/cell_8 | [
"text_plain_output_1.png"
] | liability = 14589
asset = 4000
liability = 4000
asset = 4000
if liability > asset:
print(' asset deficiency')
else:
print('going good') | code |
105194794/cell_3 | [
"text_plain_output_1.png"
] | a = 49
b = 2
if a % b == 1:
print('odd number')
else:
print('even number') | code |
105194794/cell_5 | [
"text_plain_output_1.png"
] | a = 49
b = 2
a = 49
b = 2
if a % b == 1:
print('even number')
else:
print('old number') | code |
2030468/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
df = pd.read_csv('../input/harddrive.csv', usecols=['failure', 'smart_1_normalized'], nrows=100000)
x = df['smart_1_normalized']
y = df['failure']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Binomial()).fit()
model.summary() | code |
2030468/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/harddrive.csv', usecols=['failure', 'smart_1_normalized'], nrows=100000)
sns.regplot(df['smart_1_normalized'], df['failure'], line_kws={'color': 'k', 'lw': 1}) | code |
2030468/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/harddrive.csv', usecols=['failure', 'smart_1_normalized'], nrows=100000)
df.head() | code |
2030468/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm | code |
2030468/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
df = pd.read_csv('../input/harddrive.csv', usecols=['failure', 'smart_1_normalized'], nrows=100000)
x = df['smart_1_normalized']
y = df['failure']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Binomial()).fit()
model.summary()
(model.null_deviance, model.deviance) | code |
327848/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count() | code |
327848/cell_4 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df['Survived'].mean() | code |
327848/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
print(class_sex_grouping['Survived']) | code |
327848/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import random
import numpy as np
import pandas as pd
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import sklearn.ensemble as ske
import tensorflow as tf
from tensorflow.contrib import skflow | code |
327848/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
class_sex_grouping['Survived'].plot.bar() | code |
327848/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
age_grouping['Survived'].plot.bar() | code |
327848/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.head() | code |
327848/cell_12 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count() | code |
327848/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean() | code |
2044953/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
class_1_pass = train_df[train_df['Pclass'] == 1]
print('1st Class Passengers :')
print(class_1_pass['Fare'].count())
class_1_pass_sur = class_1_pass[class_1_pass['Survived'] == 1]
print('Survivor of 1st class passengers: ')
print(class_1_pass_sur['Fare'].count())
class_1_pass_female = class_1_pass[class_1_pass['Sex'] == 'female']
print('No of female in 1st class : ')
print(class_1_pass_female['PassengerId'].count())
class_1_pass_sur_female = class_1_pass_sur[class_1_pass_sur['Sex'] == 'female']
print('Female survivor of 1st class passengers :')
print(class_1_pass_sur_female['PassengerId'].count())
class_1_female_sur_rate = class_1_pass_sur_female['PassengerId'].count() / class_1_pass_female['PassengerId'].count()
print('1st class female survivor rate :')
print(class_1_female_sur_rate)
class_1_pass_child = class_1_pass[class_1_pass['Age'] < 15]
print('No of childrn in 1st class : ')
print(class_1_pass_child['PassengerId'].count())
class_1_pass_sur_child = class_1_pass_sur[class_1_pass_sur['Age'] < 15]
print('Children survivor of 1st class passengers')
print(class_1_pass_sur_child['PassengerId'].count())
class_1_child_sur_rate = class_1_pass_sur_child['PassengerId'].count() / class_1_pass_child['PassengerId'].count()
print('1st class child survivor rate :')
print(class_1_child_sur_rate) | code |
2044953/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
class_3_pass = train_df[train_df['Pclass'] == 3]
print('3rd Class Passengers :')
print(class_3_pass['PassengerId'].count())
class_3_pass_sur = class_3_pass[class_3_pass['Survived'] == 1]
print('Survivor of 3rd class passengers: ')
print(class_3_pass_sur['PassengerId'].count())
class_3_pass_female = class_3_pass[class_3_pass['Sex'] == 'female']
print('No of female in 3rd class : ')
print(class_3_pass_female['PassengerId'].count())
class_3_pass_sur_female = class_3_pass_sur[class_3_pass_sur['Sex'] == 'female']
print('Female survivor of 3rd class passengers :')
print(class_3_pass_sur_female['PassengerId'].count())
class_3_female_sur_rate = class_3_pass_sur_female['PassengerId'].count() / class_3_pass_female['PassengerId'].count()
print('3rd class female survivor rate :')
print(class_3_female_sur_rate)
class_3_pass_child = class_3_pass[class_3_pass['Age'] < 15]
print('No of childrn in 3rd class : ')
print(class_3_pass_child['PassengerId'].count())
class_3_pass_sur_child = class_3_pass_sur[class_3_pass_sur['Age'] < 15]
print('Children survivor of 3rd class passengers')
print(class_3_pass_sur_child['PassengerId'].count())
class_3_child_sur_rate = class_3_pass_sur_child['PassengerId'].count() / class_3_pass_child['PassengerId'].count()
print('3rd class child survivor rate :')
print(class_3_child_sur_rate) | code |
2044953/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
X_train.head() | code |
2044953/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB | code |
2044953/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
logreg = LogisticRegression()
from sklearn.preprocessing import Imputer
my_imputer = Imputer()
X_train = my_imputer.fit_transform(X_train)
X_test = my_imputer.fit_transform(X_test)
logreg.fit(X_train, Y_train.values.ravel())
Y_pred = logreg.predict(X_test)
logreg.score(X_train, Y_train) | code |
2044953/cell_3 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
train_df.info()
train_df.describe() | code |
2044953/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_y = pd.read_csv('../input/gender_submission.csv')
X_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_train['Sex'])
X_train = X_train.drop(['Sex'], axis=1)
X_train = X_train.join(sex)
embarked = pd.get_dummies(X_train['Embarked'])
X_train = X_train.drop(['Embarked'], axis=1)
X_train = X_train.join(embarked)
Y_train = train_df.drop(['PassengerId', 'Ticket', 'Cabin', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Name'], axis=1)
X_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
sex = pd.get_dummies(X_test['Sex'])
X_test = X_test.drop(['Sex'], axis=1)
X_test = X_test.join(sex)
embarked = pd.get_dummies(X_test['Embarked'])
X_test = X_test.drop(['Embarked'], axis=1)
X_test = X_test.join(embarked)
Y_test = test_y.drop('PassengerId', axis=1).copy()
class_2_pass = train_df[train_df['Pclass'] == 2]
print('2nd Class Passengers :')
print(class_2_pass['Fare'].count())
class_2_pass_sur = class_2_pass[class_2_pass['Survived'] == 1]
print('Survivor of 2nd class passengers: ')
print(class_2_pass_sur['Fare'].count())
class_2_pass_female = class_2_pass[class_2_pass['Sex'] == 'female']
print('No of female in 2nd class : ')
print(class_2_pass_female['PassengerId'].count())
class_2_pass_sur_female = class_2_pass_sur[class_2_pass_sur['Sex'] == 'female']
print('Female survivor of 2nd class passengers :')
print(class_2_pass_sur_female['PassengerId'].count())
class_2_female_sur_rate = class_2_pass_sur_female['PassengerId'].count() / class_2_pass_female['PassengerId'].count()
print('2nd class female survivor rate :')
print(class_2_female_sur_rate)
class_2_pass_child = class_2_pass[class_2_pass['Age'] < 15]
print('No of childrn in 2nd class : ')
print(class_2_pass_child['PassengerId'].count())
class_2_pass_sur_child = class_2_pass_sur[class_2_pass_sur['Age'] < 15]
print('Children survivor of 2nd class passengers')
print(class_2_pass_sur_child['PassengerId'].count())
class_2_child_sur_rate = class_2_pass_sur_child['PassengerId'].count() / class_2_pass_child['PassengerId'].count()
print('2nd class child survivor rate :')
print(class_2_child_sur_rate) | code |
16168012/cell_21 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
id_feature = 'molecule_name'
target_feature = (set(molecule_train) - set(molecule_test)).pop()
selected_features = list(molecule_test)
selected_features.remove(id_feature)
selected_features.remove('atom')
X = molecule_train[selected_features]
y = molecule_train[target_feature]
kfold = KFold(n_splits=N_SPLITS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
fold = 0
r2_scores = []
mse_scores = []
lin_reg = LinearRegression()
for in_index, oof_index in kfold.split(X, y):
fold += 1
X_in, X_oof = (X.loc[in_index], X.loc[oof_index])
y_in, y_oof = (y.loc[in_index], y.loc[oof_index])
lin_reg.fit(X_in, y_in)
y_pred = lin_reg.predict(X_oof)
r2 = r2_score(y_oof, y_pred)
r2_scores.append(r2)
mse_score = mean_squared_error(y_oof, y_pred)
mse_scores.append(mse_score)
plt.plot(y_oof, y_pred)
plt.show() | code |
16168012/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
molecule_train.head() | code |
16168012/cell_25 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
id_feature = 'molecule_name'
target_feature = (set(molecule_train) - set(molecule_test)).pop()
selected_features = list(molecule_test)
selected_features.remove(id_feature)
selected_features.remove('atom')
X = molecule_train[selected_features]
y = molecule_train[target_feature]
kfold = KFold(n_splits=N_SPLITS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
fold = 0
r2_scores = []
mse_scores = []
lin_reg = LinearRegression()
for in_index, oof_index in kfold.split(X, y):
fold += 1
X_in, X_oof = (X.loc[in_index], X.loc[oof_index])
y_in, y_oof = (y.loc[in_index], y.loc[oof_index])
lin_reg.fit(X_in, y_in)
y_pred = lin_reg.predict(X_oof)
r2 = r2_score(y_oof, y_pred)
r2_scores.append(r2)
mse_score = mean_squared_error(y_oof, y_pred)
mse_scores.append(mse_score)
plt.figure(figsize=FIGSIZE)
molecule_train['potential_energy'].plot(kind='kde')
molecule_test['potential_energy'].plot(kind='kde')
plt.show() | code |
16168012/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import KFold
import numpy as np
import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
id_feature = 'molecule_name'
target_feature = (set(molecule_train) - set(molecule_test)).pop()
selected_features = list(molecule_test)
selected_features.remove(id_feature)
selected_features.remove('atom')
X = molecule_train[selected_features]
y = molecule_train[target_feature]
kfold = KFold(n_splits=N_SPLITS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
fold = 0
r2_scores = []
mse_scores = []
lin_reg = LinearRegression()
for in_index, oof_index in kfold.split(X, y):
fold += 1
print('- Training Fold: ({}/{})'.format(fold, N_SPLITS))
X_in, X_oof = (X.loc[in_index], X.loc[oof_index])
y_in, y_oof = (y.loc[in_index], y.loc[oof_index])
lin_reg.fit(X_in, y_in)
y_pred = lin_reg.predict(X_oof)
r2 = r2_score(y_oof, y_pred)
r2_scores.append(r2)
mse_score = mean_squared_error(y_oof, y_pred)
mse_scores.append(mse_score)
print('\nkFold Validation Results:')
print(' * Average Variance Score (R2): \t{:.4f}'.format(np.mean(r2_scores)))
print(' * Average Mean squared error (MSE): \t{:.4f}'.format(np.mean(mse_score))) | code |
16168012/cell_2 | [
"text_plain_output_1.png"
] | import os
import warnings
import warnings
import numpy as np
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression
import os
print(os.listdir('../input')) | code |
16168012/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
id_feature = 'molecule_name'
target_feature = (set(molecule_train) - set(molecule_test)).pop()
selected_features = list(molecule_test)
selected_features.remove(id_feature)
selected_features.remove('atom')
print('Selected Features: \t{}'.format(selected_features))
print('Target Feature: \t{}'.format(target_feature))
print('Id Feature: \t\t{}'.format(id_feature)) | code |
16168012/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd
FIGSIZE = (10, 6)
SAVE_PICKLE = True
FREE_MEMORY = True
OUTPUT_FILE = 'potential_energy_upd'
RANDOM_STATE = 123
N_SPLITS = 3
SHUFFLE = True
VERBOSE = False
DATA_PATH = '../input'
def csv_path(dataset='train', data_path=DATA_PATH):
"""
"""
return '{}/{}.csv'.format(data_path, dataset)
def read_data(dataset='train', data_path=DATA_PATH):
"""
"""
index_col = None
index_type = ['train', 'test']
if dataset in index_type:
index_col = 'id'
data_path = csv_path(dataset, data_path=data_path)
return pd.read_csv(data_path, index_col=index_col)
train = read_data('train')
test = read_data('test')
molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()})
molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()})
structures = read_data('structures')
atom_list_df = structures.groupby('molecule_name')['atom'].apply(list)
atom_list_df = atom_list_df.to_frame()
if FREE_MEMORY:
del train
del test
molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name')
molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name')
potential_energy = read_data('potential_energy')
molecule_train = pd.merge(molecule_train, potential_energy)
if FREE_MEMORY:
del potential_energy
del structures
id_feature = 'molecule_name'
target_feature = (set(molecule_train) - set(molecule_test)).pop()
selected_features = list(molecule_test)
selected_features.remove(id_feature)
selected_features.remove('atom')
potential_energy_upd = pd.concat([molecule_train[[id_feature, target_feature]], molecule_test[[id_feature, target_feature]]], ignore_index=True)
potential_energy_upd = potential_energy_upd.sort_values(id_feature)
potential_energy_upd.reset_index(drop=True, inplace=True)
potential_energy_upd.head() | code |
16166680/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
print('Number of numerical variables: ', len(num_vars))
data[num_vars].head() | code |
16166680/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
print('Number of discrete variables: ', len(discrete_vars)) | code |
16166680/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
print(data.shape)
data.head() | code |
16166680/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
def analyse_year_vars(df, var):
df = df.copy()
df[var] = df['YrSold'] - df[var]
for var in year_vars:
if var != 'YrSold':
analyse_year_vars(data, var)
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
def analyse_discrete(df, var):
df = df.copy()
for var in discrete_vars:
analyse_discrete(data, var)
cont_vars = [var for var in num_vars if var not in discrete_vars + year_vars + ['Id']]
def analyse_continous(df, var):
df = df.copy()
df[var].hist(bins=20)
plt.ylabel('Number of houses')
plt.xlabel(var)
plt.title(var)
plt.show()
for var in cont_vars:
analyse_continous(data, var) | code |
16166680/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
data.groupby('YrSold')['SalePrice'].median().plot()
plt.ylabel('Median House Price')
plt.title('Change in House price with the years') | code |
16166680/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
data[discrete_vars].head() | code |
16166680/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
for var in vars_with_na:
print(var, np.round(data[var].isnull().mean(), 3), ' % missing values') | code |
16166680/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
for var in year_vars:
print(var, data[var].unique())
print() | code |
16166680/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
cont_vars = [var for var in num_vars if var not in discrete_vars + year_vars + ['Id']]
data[cont_vars].head() | code |
16166680/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
def analyse_year_vars(df, var):
df = df.copy()
df[var] = df['YrSold'] - df[var]
for var in year_vars:
if var != 'YrSold':
analyse_year_vars(data, var)
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
def analyse_discrete(df, var):
df = df.copy()
df.groupby(var)['SalePrice'].median().plot.bar()
plt.title(var)
plt.ylabel('SalePrice')
plt.show()
for var in discrete_vars:
analyse_discrete(data, var) | code |
16166680/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
print('Number of House Id labels: ', len(data.Id.unique()))
print('Number of Houses in the Dataset: ', len(data)) | code |
16166680/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars | code |
16166680/cell_35 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
def analyse_year_vars(df, var):
df = df.copy()
df[var] = df['YrSold'] - df[var]
for var in year_vars:
if var != 'YrSold':
analyse_year_vars(data, var)
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
def analyse_discrete(df, var):
df = df.copy()
for var in discrete_vars:
analyse_discrete(data, var)
cont_vars = [var for var in num_vars if var not in discrete_vars + year_vars + ['Id']]
def analyse_continous(df, var):
df = df.copy()
for var in cont_vars:
analyse_continous(data, var)
def analyse_transformed_continous(df, var):
df = df.copy()
df[var] = np.log(df[var])
df[var].hist(bins=20)
plt.ylabel('Number of houses')
plt.xlabel(var)
plt.title(var)
plt.show()
for var in cont_vars:
analyse_transformed_continous(data, var) | code |
16166680/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
discrete_vars = [var for var in num_vars if len(data[var].unique()) < 20 and var not in year_vars + ['Id']]
cont_vars = [var for var in num_vars if var not in discrete_vars + year_vars + ['Id']]
print('Number of continuous variables: ', len(cont_vars)) | code |
16166680/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
for var in vars_with_na:
analyse_na_value(data, var)
num_vars = [var for var in data.columns if data[var].dtypes != 'O']
year_vars = [var for var in num_vars if 'Yr' in var or 'Year' in var]
year_vars
def analyse_year_vars(df, var):
df = df.copy()
df[var] = df['YrSold'] - df[var]
plt.scatter(df[var], df['SalePrice'])
plt.ylabel('SalePrice')
plt.xlabel(var)
plt.show()
for var in year_vars:
if var != 'YrSold':
analyse_year_vars(data, var) | code |
16166680/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.pandas.set_option('display.max_columns', None)
data = pd.read_csv('houseprice.csv')
vars_with_na = [var for var in data.columns if data[var].isnull().sum() > 1]
def analyse_na_value(df, var):
df = df.copy()
df[var] = np.where(df[var].isnull(), 1, 0)
df.groupby(var)['SalePrice'].median().plot.bar()
plt.title(var)
plt.show()
for var in vars_with_na:
analyse_na_value(data, var) | code |
88093938/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes | code |
88093938/cell_9 | [
"image_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.head(5) | code |
88093938/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data['club_member_status'].value_counts() | code |
88093938/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum()
sample_trans_data.info() | code |
88093938/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
sns.set_style('whitegrid')
customers_data_new['age'].plot(kind='hist') | code |
88093938/cell_6 | [
"image_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.head(5) | code |
88093938/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum() | code |
88093938/cell_39 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum()
sns.set_style('whitegrid')
interval_range_age = pd.interval_range(start=0, freq=10, end=100)
customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age)
customers_data_new.isna().sum()
purchases_2019 = sample_trans_data.merge(customers_data_new, how='left', on='customer_id')
customers_temp = purchases_2019.groupby(['age_group'])['customer_id'].count()
data_temp_customer = pd.DataFrame({'Group Age': customers_temp.index, 'Customers': customers_temp.values})
data_temp_customer = data_temp_customer.sort_values(['Group Age'], ascending=False)
plt.figure(figsize=(7, 7))
plt.title(f'Group Age')
sns.set_color_codes('pastel')
s = sns.barplot(x='Group Age', y='Customers', data=data_temp_customer)
s.set_xticklabels(s.get_xticklabels(), rotation=45)
locs, labels = plt.xticks()
plt.show | code |
88093938/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum() | code |
88093938/cell_41 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum()
sns.set_style('whitegrid')
interval_range_age = pd.interval_range(start=0, freq=10, end=100)
customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age)
customers_data_new.isna().sum()
purchases_2019 = sample_trans_data.merge(customers_data_new, how='left', on='customer_id')
customers_temp = purchases_2019.groupby(['age_group'])['customer_id'].count()
data_temp_customer = pd.DataFrame({
'Group Age' : customers_temp.index,
'Customers' : customers_temp.values
})
data_temp_customer = data_temp_customer.sort_values(['Group Age'],ascending=False)
plt.figure(figsize=(7,7))
plt.title(f'Group Age')
sns.set_color_codes('pastel')
s = sns.barplot(x='Group Age', y='Customers', data=data_temp_customer)
s.set_xticklabels(s.get_xticklabels(),rotation=45)
locs, labels = plt.xticks()
plt.show
most_age_group_transaction = purchases_2019[purchases_2019['age_group'] == purchases_2019['age_group'].mode()[0]]
customers_temp_most = most_age_group_transaction.groupby(['day_trans'])['customer_id'].count()
data_temp_customer_most = pd.DataFrame({'Day Transaction': customers_temp_most.index, 'Customers': customers_temp_most.values})
data_temp_customer_most = data_temp_customer_most.sort_values(['Customers'], ascending=False)
plt.figure(figsize=(7, 7))
plt.title(f'Day Transaction of Most Age Group Customers')
sns.set_color_codes('pastel')
s = sns.barplot(x='Day Transaction', y='Customers', data=data_temp_customer_most)
s.set_xticklabels(s.get_xticklabels())
locs, labels = plt.xticks()
plt.show() | code |
88093938/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
articles_data_new = articles_data[['article_id', 'prod_name', 'product_type_name', 'product_group_name']].copy()
articles_data_new.isna().sum()
articles_data_new.info() | code |
88093938/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
articles_data_new = articles_data[['article_id', 'prod_name', 'product_type_name', 'product_group_name']].copy()
articles_data_new.isna().sum() | code |
88093938/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True) | code |
88093938/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True) | code |
88093938/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
articles_data[['prod_name', 'product_type_name', 'product_group_name']].describe() | code |
88093938/cell_17 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
articles_data_new = articles_data[['article_id', 'prod_name', 'product_type_name', 'product_group_name']].copy()
articles_data_new.head(5) | code |
88093938/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
interval_range_age = pd.interval_range(start=0, freq=10, end=100)
customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age)
customers_data_new.head(5) | code |
88093938/cell_43 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum()
sns.set_style('whitegrid')
interval_range_age = pd.interval_range(start=0, freq=10, end=100)
customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age)
customers_data_new.isna().sum()
purchases_2019 = sample_trans_data.merge(customers_data_new, how='left', on='customer_id')
customers_temp = purchases_2019.groupby(['age_group'])['customer_id'].count()
data_temp_customer = pd.DataFrame({
'Group Age' : customers_temp.index,
'Customers' : customers_temp.values
})
data_temp_customer = data_temp_customer.sort_values(['Group Age'],ascending=False)
plt.figure(figsize=(7,7))
plt.title(f'Group Age')
sns.set_color_codes('pastel')
s = sns.barplot(x='Group Age', y='Customers', data=data_temp_customer)
s.set_xticklabels(s.get_xticklabels(),rotation=45)
locs, labels = plt.xticks()
plt.show
#day transaction of Most Age Group of Customers
most_age_group_transaction = purchases_2019[(purchases_2019['age_group']==purchases_2019['age_group'].mode()[0])]
customers_temp_most = most_age_group_transaction.groupby(['day_trans'])['customer_id'].count()
data_temp_customer_most = pd.DataFrame({
'Day Transaction' : customers_temp_most.index,
'Customers' : customers_temp_most.values
})
data_temp_customer_most = data_temp_customer_most.sort_values(['Customers'],ascending=False)
plt.figure(figsize=(7,7))
plt.title(f'Day Transaction of Most Age Group Customers')
sns.set_color_codes('pastel')
s = sns.barplot(x='Day Transaction', y='Customers', data=data_temp_customer_most)
s.set_xticklabels(s.get_xticklabels())
locs, labels = plt.xticks()
plt.show()
bins = [0, 3, 6, 9, 12]
labels = ['Winter', 'Spring', 'Summer', 'Autumn']
purchases_2019['Seasons'] = pd.cut(purchases_2019['month_trans'], bins=bins, labels=labels)
purchases_2019.head(5) | code |
88093938/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes
sample_trans_data = trans_data[trans_data['year_trans'] == 2019]
sample_trans_data.isna().sum()
sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True)
sample_trans_data.reset_index(drop=True, inplace=True)
sample_trans_data.isna().sum()
sample_trans_data.head(5) | code |
88093938/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
articles_data.head(5) | code |
88093938/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
trans_data.dtypes
trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64')
trans_data.dtypes | code |
88093938/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.head(5) | code |
88093938/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.info() | code |
88093938/cell_36 | [
"text_plain_output_1.png"
] | import pandas as pd
articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv')
customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv')
submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv')
trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv')
customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE']
customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True)
customers_data_new.reset_index(drop=True, inplace=True)
interval_range_age = pd.interval_range(start=0, freq=10, end=100)
customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age)
customers_data_new.isna().sum() | code |
106206518/cell_21 | [
"text_plain_output_1.png"
] | from heapq import nlargest
from spacy.lang.en.stop_words import STOP_WORDS
from string import punctuation
import spacy
text = '"In an attempt to build an AI-ready workforce, Microsoft announced Intelligent Cloud Hub which has been lanched to empower the next generation of students with AI-ready skills. Envisioned as a three-year collaborative program, Intelligent Cloud Hub will support around 100 institutions with AI infrastructure, course content and curriculum, developer support, development tools and give students access to cloud and AI services. As part of the program, the Redmond giant which wants to expand its reach and is planning to build a strong developer ecosystem in India with the program will set up the core AI infrastructure and IoT Hub for the selected campuses. The company will provide AI development tools and Azure AI services such as Microsoft Cognitive Services, Bot Services and Azure Machine Learning.According to Manish Prakash, Country General Manager-PS, Health and Education, Microsoft India, said, With AI being the defining technology of our time, it is transforming lives and industry and the jobs of tomorrow will require a different skillset. This will require more collaborations and training and working with AI. That’s why it has become more critical than ever for educational institutions to integrate new cloud and AI technologies. The program is an attempt to ramp up the institutional set-up and build capabilities among the educators to educate the workforce of tomorrow. The program aims to build up the cognitive skills and in-depth understanding of developing intelligent cloud connected solutions for applications across industry. Earlier in April this year, the company announced Microsoft Professional Program In AI as a learning track open to the public. The program was developed to provide job ready skills to programmers who wanted to hone their skills in AI and data science with a series of online courses which featured hands-on labs and expert instructors as well. This program also included developer-focused AI school that provided a bunch of assets to help build AI skills.'
def textSummarizer(text, percentage):
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
tokens = [token.text for token in doc]
freq_of_word = dict()
for word in doc:
if word.text.lower() not in list(STOP_WORDS):
if word.text.lower() not in punctuation:
if word.text not in freq_of_word.keys():
freq_of_word[word.text] = 1
else:
freq_of_word[word.text] += 1
max_freq = max(freq_of_word.values())
for word in freq_of_word.keys():
freq_of_word[word] = freq_of_word[word] / max_freq
sent_tokens = [sent for sent in doc.sents]
sent_scores = dict()
for sent in sent_tokens:
for word in sent:
if word.text.lower() in freq_of_word.keys():
if sent not in sent_scores.keys():
sent_scores[sent] = freq_of_word[word.text.lower()]
else:
sent_scores[sent] += freq_of_word[word.text.lower()]
len_tokens = int(len(sent_tokens) * percentage)
summary = nlargest(n=len_tokens, iterable=sent_scores, key=sent_scores.get)
final_summary = [word.text for word in summary]
summary = ' '.join(final_summary)
return summary
final_summary = textSummarizer(text, 0.2)
print('#' * 50)
print('Summary of the text')
print('Length of summarized text:', len(final_summary))
print('#' * 50)
print()
print(final_summary) | code |
106206518/cell_15 | [
"text_plain_output_1.png"
] | text = '"In an attempt to build an AI-ready workforce, Microsoft announced Intelligent Cloud Hub which has been lanched to empower the next generation of students with AI-ready skills. Envisioned as a three-year collaborative program, Intelligent Cloud Hub will support around 100 institutions with AI infrastructure, course content and curriculum, developer support, development tools and give students access to cloud and AI services. As part of the program, the Redmond giant which wants to expand its reach and is planning to build a strong developer ecosystem in India with the program will set up the core AI infrastructure and IoT Hub for the selected campuses. The company will provide AI development tools and Azure AI services such as Microsoft Cognitive Services, Bot Services and Azure Machine Learning.According to Manish Prakash, Country General Manager-PS, Health and Education, Microsoft India, said, With AI being the defining technology of our time, it is transforming lives and industry and the jobs of tomorrow will require a different skillset. This will require more collaborations and training and working with AI. That’s why it has become more critical than ever for educational institutions to integrate new cloud and AI technologies. The program is an attempt to ramp up the institutional set-up and build capabilities among the educators to educate the workforce of tomorrow. The program aims to build up the cognitive skills and in-depth understanding of developing intelligent cloud connected solutions for applications across industry. Earlier in April this year, the company announced Microsoft Professional Program In AI as a learning track open to the public. The program was developed to provide job ready skills to programmers who wanted to hone their skills in AI and data science with a series of online courses which featured hands-on labs and expert instructors as well. This program also included developer-focused AI school that provided a bunch of assets to help build AI skills.'
print('Length of original text:', len(text)) | code |
72086844/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.head() | code |
72086844/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
y = train['target']
features = train.drop(['target'], axis=1)
features.head() | code |
72086844/cell_8 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
X = features.copy()
X_test = test.copy()
ordinal_encoder = OrdinalEncoder()
X[object_cols] = ordinal_encoder.fit_transform(features[object_cols])
X_test[object_cols] = ordinal_encoder.transform(test[object_cols])
X.head() | code |
50216735/cell_9 | [
"text_plain_output_1.png"
] | X_train | code |
50216735/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.Sentiment.unique() | code |
50216735/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.Sentiment.unique()
from sklearn.feature_extraction.text import TfidfVectorizer
tfv = TfidfVectorizer(min_df=3, max_features=49748, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english')
train['Phrase'] = train['Phrase'].fillna('')
tfv_matrix = tfv.fit_transform(train['Phrase'])
tfv_matrix.shape | code |
50216735/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50216735/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.Sentiment.unique()
from sklearn.feature_extraction.text import TfidfVectorizer
tfv = TfidfVectorizer(min_df=3, max_features=49748, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english')
train['Phrase'] = train['Phrase'].fillna('')
tfv_matrix = tfv.fit_transform(train['Phrase'])
tfv_matrix.shape
tfv_matrix | code |
50216735/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.Sentiment.unique()
from sklearn.feature_extraction.text import TfidfVectorizer
tfv = TfidfVectorizer(min_df=3, max_features=49748, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english')
train['Phrase'] = train['Phrase'].fillna('')
tfv_matrix = tfv.fit_transform(train['Phrase'])
tfv_matrix.shape
test['Phrase'] = test['Phrase'].fillna('')
tfv_test_matrix = tfv.fit_transform(test['Phrase'])
tfv_test_matrix.shape | code |
50216735/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.head() | code |
50216735/cell_17 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
train.Sentiment.unique()
from sklearn.feature_extraction.text import TfidfVectorizer
tfv = TfidfVectorizer(min_df=3, max_features=49748, strip_accents='unicode', analyzer='word', token_pattern='\\w{1,}', ngram_range=(1, 3), stop_words='english')
train['Phrase'] = train['Phrase'].fillna('')
tfv_matrix = tfv.fit_transform(train['Phrase'])
tfv_matrix.shape
MNB = MultinomialNB()
MNB.fit(X_train, Y_train)
from sklearn import metrics
predicted = MNB.predict(X_test)
accuracy_score = metrics.accuracy_score(predicted, Y_test)
test['Phrase'] = test['Phrase'].fillna('')
tfv_test_matrix = tfv.fit_transform(test['Phrase'])
tfv_test_matrix.shape
submission = MNB.predict(tfv_test_matrix)
submission | code |
50216735/cell_14 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB
MNB = MultinomialNB()
MNB.fit(X_train, Y_train)
from sklearn import metrics
predicted = MNB.predict(X_test)
accuracy_score = metrics.accuracy_score(predicted, Y_test)
print(str('{:04.2f}'.format(accuracy_score * 100)) + '%') | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.