path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73074345/cell_13
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, accuracy_score def score_model(model, X_t=X_train, X_v=X_valid, y_t=y_train, y_v=y_valid): model.fit(X_t, y_t) preds = model.predict(X_v) return mean_absolute_error(y_v, preds) model_1 = RandomForestRegressor(n_estimators=50, random_state=69) model_2 = RandomForestRegressor(n_estimators=100, random_state=69) model_3 = RandomForestRegressor(n_estimators=100, criterion='mae', random_state=69) model_4 = RandomForestRegressor(n_estimators=200, min_samples_split=20, random_state=69) model_5 = RandomForestRegressor(n_estimators=100, max_depth=7, random_state=69) models = [model_1, model_2, model_3, model_4, model_5] mae1 = score_model(model_1, X_t=X_train, X_v=X_valid, y_t=y_train, y_v=y_valid) print('model 1 MAE is :') print(mae1)
code
73074345/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X = X_full.copy() X_test = X_test_full.copy() X.drop(['target'], axis=1, inplace=True) X_numeric = X.select_dtypes(exclude=['object']) X_test_numeric = X_test_full.select_dtypes(exclude=['object']) X_numeric.head()
code
73074345/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X = X_full.copy() X_test = X_test_full.copy() y = X_full.target y.head()
code
73074345/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73074345/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X = X_full.copy() X_test = X_test_full.copy() X.drop(['target'], axis=1, inplace=True) X.head()
code
73074345/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X = X_full.copy() X_test = X_test_full.copy() X_full.head()
code
73074345/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X = X_full.copy() X_test = X_test_full.copy() y = X_full.target X_full.info() print('*' * 100) X_full.isna().sum()
code
325098/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier df = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) df.columns
code
325098/cell_7
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier df = df[df['posteam'] == 'CHI'] df = df[df['DefensiveTeam'] == 'GB'] used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') rf = RandomForestClassifier(n_estimators=1000) rf.fit(X, y) test_y = test.pop('is_pass') rf.score(test, test_y)
code
325098/cell_5
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier df = df[df['posteam'] == 'CHI'] df = df[df['DefensiveTeam'] == 'GB'] used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') rf = RandomForestClassifier(n_estimators=1000) rf.fit(X, y)
code
1008127/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from fig_code import plot_iris_knn from fig_code import plot_iris_knn plot_iris_knn()
code
1008127/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.datasets import load_iris import matplotlib.pyplot as plt from sklearn.datasets import load_iris data = load_iris() n_samples, n_features = data.data.shape x_index = 1 y_index = 2 formatter = plt.FuncFormatter(lambda i, *args: data.target_names[int(i)]) plt.scatter(data.data[:, x_index], data.data[:, y_index], c=data.target, cmap=plt.cm.get_cmap('RdYlBu', 3)) plt.colorbar(ticks=[0, 1, 2], format=formatter) plt.clim(-0.5, 2.5) plt.xlabel(data.feature_names[x_index]) plt.ylabel(data.feature_names[y_index])
code
1008127/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008127/cell_8
[ "text_plain_output_1.png" ]
from sklearn import neighbors, datasets from sklearn.datasets import load_iris import matplotlib.pyplot as plt from sklearn.datasets import load_iris data = load_iris() n_samples, n_features = data.data.shape x_index = 1 y_index = 2 formatter = plt.FuncFormatter(lambda i, *args: data.target_names[int(i)]) plt.colorbar(ticks=[0, 1, 2], format=formatter) plt.clim(-0.5, 2.5) from sklearn import neighbors, datasets data = datasets.load_iris() X, y = (data.data, data.target) clf = neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform') clf.fit(X, y) X_test = [3, 4, 2, 5] y_pred = clf.predict([X_test]) print(y_pred) print(data.target_names[y_pred]) print(data.target_names) print(clf.predict_proba([X_test]))
code
1008127/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from IPython.display import Image from IPython.display import Image Image('http://scikit-learn.org/dev/_static/ml_map.png', width=800)
code
1008127/cell_5
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris from sklearn.datasets import load_iris data = load_iris() n_samples, n_features = data.data.shape print(data.keys()) print(n_samples, n_features) print(data.data.shape) print(data.target.shape) print(data.target_names) print(data.feature_names)
code
2025162/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y='price', data=data) fig.axis(ymin=0, ymax=8000000)
code
2025162/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.describe()
code
2025162/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] k = 10 corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
2025162/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb import statsmodels.api as sm df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) X = df[[var, var1, var2, var3, var4, 'view']] y = df['price'] est = sm.OLS(y, X).fit() est.summary()
code
2025162/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.info()
code
2025162/cell_18
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) X = df[[var, var1, var2, var3, var4, 'view']] y = df['price'] LinReg = LinearRegression(normalize=True) LinReg.fit(X, y) print(LinReg.score(X, y))
code
2025162/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') f, ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax)
code
2025162/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) data.plot.scatter(x=var3, y='price', ylim=(0, 8000000))
code
2025162/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) data.plot.scatter(x=var4, y='price', ylim=(0, 8000000))
code
2025162/cell_3
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale import statsmodels.api as sm from sklearn.preprocessing import StandardScaler scale = StandardScaler() from scipy import stats
code
2025162/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) data.plot.scatter(x=var2, y='price', ylim=(0, 8000000))
code
2025162/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] sb.pairplot(cols, size=2.5) plt.show()
code
2025162/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) sb.set() cols = df[['price', 'sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_living15']] #saleprice correlation matrix k = 10 #number of variables for heatmap corrmat = df.corr() cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(df[cols].values.T) sb.set(font_scale=1.25) hm = sb.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) data.plot.scatter(x=var, y='price', ylim=(0, 8000000))
code
2025162/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.head()
code
329250/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt img = cv2.imread('../input/train_sm/set107_1.jpeg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img)
code
329250/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
329250/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sub = pd.read_csv('../input/sample_submission.csv') df_train = pd.read_csv('../input/train_sm/')
code
329250/cell_5
[ "text_plain_output_1.png" ]
import cv2 img = cv2.imread('../input/train_sm/set107_1.jpeg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img.shape
code
129036266/cell_25
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2) df = df.drop('OPM remarks', axis=1) df = df.dropna() df.sample(3)
code
129036266/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.describe()
code
129036266/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2)
code
129036266/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv')
code
129036266/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2) df = df.drop('OPM remarks', axis=1) df = df.dropna() df['Date Recorded'].values
code
129036266/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px
code
129036266/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2) df['OPM remarks'].value_counts()
code
129036266/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2) df = df.drop('OPM remarks', axis=1) df = df.dropna() df['Date Recorded'].values
code
129036266/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.info()
code
129036266/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.sample(2) df = df.drop('OPM remarks', axis=1) df = df.dropna() df.info()
code
129036266/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/real-estate-sales-2001-2020-gl/Real_Estate_Sales_2001-2020_GL.csv') df.describe(include='all')
code
129040633/cell_4
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_12.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_1.png", "text_plain_output_11.png" ]
from skimage import io from torchvision import datasets, transforms import os import pandas as pd def fetch_dataset(path, attrs_name='lfw_attributes.txt', images_name='lfw-deepfunneled', dx=80, dy=80, dimx=64, dimy=64): if not os.path.exists(images_name): os.system('wget http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz -O tmp.tgz') os.system('tar xvzf tmp.tgz && rm tmp.tgz') assert os.path.exists(images_name) if not os.path.exists(attrs_name): os.system('wget http://www.cs.columbia.edu/CAVE/databases/pubfig/download/%s' % attrs_name) transform = transforms.Compose([transforms.ToPILImage(), transforms.CenterCrop((dx, dy)), transforms.Resize((dimx, dimy)), transforms.ToTensor()]) df_attrs = pd.read_csv(os.path.join(path, attrs_name), sep='\t', skiprows=1) df_attrs = pd.DataFrame(df_attrs.iloc[:, :-1].values, columns=df_attrs.columns[1:]) photo_ids = [] for dirpath, dirnames, filenames in os.walk(os.path.join(path, images_name)): for fname in filenames: if fname.endswith('.jpg'): fpath = os.path.join(dirpath, fname) photo_id = fname[:-4].replace('_', ' ').split() person_id = ' '.join(photo_id[:-1]) photo_number = int(photo_id[-1]) photo_ids.append({'person': person_id, 'imagenum': photo_number, 'photo_path': fpath}) photo_ids = pd.DataFrame(photo_ids) df = pd.merge(df_attrs, photo_ids, on=('person', 'imagenum')) assert len(df) == len(df_attrs), 'lost some data when merging dataframes' all_photos = df['photo_path'].apply(io.imread).apply(transform) all_photos = all_photos.values all_attrs = df.drop(['photo_path', 'person', 'imagenum'], axis=1) return (all_photos, all_attrs) img_size = 64 path = os.path.abspath('') data, attrs = fetch_dataset(path=path, dimx=img_size, dimy=img_size)
code
129040633/cell_6
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') batch_size = 32 train_photos, val_photos, train_attrs, val_attrs = train_test_split(data, attrs, train_size=0.8, shuffle=False) print('Training input shape: ', train_photos.shape) data_tr = torch.utils.data.DataLoader(train_photos, batch_size=batch_size) data_val = torch.utils.data.DataLoader(val_photos, batch_size=batch_size)
code
129040633/cell_2
[ "image_output_1.png" ]
import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device)
code
129040633/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np from torch.autograd import Variable from torchvision import datasets, transforms from skimage import io import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data_utils import torch import matplotlib.pyplot as plt import os import pandas as pd from skimage.transform import resize from IPython.display import clear_output
code
129040633/cell_7
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') batch_size = 32 train_photos, val_photos, train_attrs, val_attrs = train_test_split(data, attrs, train_size=0.8, shuffle=False) data_tr = torch.utils.data.DataLoader(train_photos, batch_size=batch_size) data_val = torch.utils.data.DataLoader(val_photos, batch_size=batch_size) plt.figure(figsize=(18, 6)) for i in range(12): plt.subplot(2, 6, i + 1) plt.axis('off') plt.imshow(data_tr.dataset[i].permute(1, 2, 0)) plt.show()
code
129040633/cell_16
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from IPython.display import clear_output from skimage import io from sklearn.model_selection import train_test_split from time import time from torch.optim import lr_scheduler from torchvision import datasets, transforms from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import torch import torch.nn as nn device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def fetch_dataset(path, attrs_name='lfw_attributes.txt', images_name='lfw-deepfunneled', dx=80, dy=80, dimx=64, dimy=64): if not os.path.exists(images_name): os.system('wget http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz -O tmp.tgz') os.system('tar xvzf tmp.tgz && rm tmp.tgz') assert os.path.exists(images_name) if not os.path.exists(attrs_name): os.system('wget http://www.cs.columbia.edu/CAVE/databases/pubfig/download/%s' % attrs_name) transform = transforms.Compose([transforms.ToPILImage(), transforms.CenterCrop((dx, dy)), transforms.Resize((dimx, dimy)), transforms.ToTensor()]) df_attrs = pd.read_csv(os.path.join(path, attrs_name), sep='\t', skiprows=1) df_attrs = pd.DataFrame(df_attrs.iloc[:, :-1].values, columns=df_attrs.columns[1:]) photo_ids = [] for dirpath, dirnames, filenames in os.walk(os.path.join(path, images_name)): for fname in filenames: if fname.endswith('.jpg'): fpath = os.path.join(dirpath, fname) photo_id = fname[:-4].replace('_', ' ').split() person_id = ' '.join(photo_id[:-1]) photo_number = int(photo_id[-1]) photo_ids.append({'person': person_id, 'imagenum': photo_number, 'photo_path': fpath}) photo_ids = pd.DataFrame(photo_ids) df = pd.merge(df_attrs, photo_ids, on=('person', 'imagenum')) assert len(df) == len(df_attrs), 'lost some data when merging dataframes' all_photos = df['photo_path'].apply(io.imread).apply(transform) all_photos = all_photos.values all_attrs = df.drop(['photo_path', 'person', 'imagenum'], axis=1) return (all_photos, all_attrs) batch_size = 32 train_photos, val_photos, train_attrs, val_attrs = train_test_split(data, attrs, train_size=0.8, shuffle=False) data_tr = torch.utils.data.DataLoader(train_photos, batch_size=batch_size) data_val = torch.utils.data.DataLoader(val_photos, batch_size=batch_size) for i in range(12): plt.axis('off') dim_code = 32 class CVAE(nn.Module): def __init__(self, base_channel_size: int, latent_dim: int, num_classes: int, num_input_channels: int=3, act_fn=nn.ReLU): super().__init__() self.dummy_param = nn.Parameter(torch.empty(0)) self.latent_dim = latent_dim self.c_hid = base_channel_size self.num_classes = num_classes conv_size = int(np.exp2(np.log2(self.c_hid) - 3)) ln_size = 2 * self.c_hid * conv_size * conv_size self.encoder = nn.Sequential(nn.Conv2d(num_input_channels, self.c_hid, kernel_size=3, padding=1, stride=2), act_fn(), nn.Conv2d(self.c_hid, self.c_hid, kernel_size=3, padding=1), act_fn(), nn.Conv2d(self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1, stride=2), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1, stride=2), act_fn()) self.flatten = nn.Flatten(start_dim=1) self.linear_mu = nn.Sequential(nn.Linear(ln_size, latent_dim)) self.linear_logvar = nn.Sequential(nn.Linear(ln_size, latent_dim)) self.linear_decoder = nn.Sequential(nn.Linear(latent_dim + num_classes, ln_size), act_fn()) self.unflatten = nn.Sequential(nn.Unflatten(dim=1, unflattened_size=(2 * self.c_hid, conv_size, conv_size))) self.decoder = nn.Sequential(nn.ConvTranspose2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1), act_fn(), nn.ConvTranspose2d(2 * self.c_hid, self.c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), act_fn(), nn.Conv2d(self.c_hid, self.c_hid, kernel_size=3, padding=1), act_fn(), nn.ConvTranspose2d(self.c_hid, num_input_channels, kernel_size=3, output_padding=1, padding=1, stride=2), nn.Sigmoid()) def encode(self, x): x = self.encoder(x) x = self.flatten(x) mu = self.linear_mu(x) logvar = self.linear_logvar(x) return (mu, logvar) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar / 2) eps = torch.randn_like(std) return eps * std + mu else: return mu def decode(self, x): x = self.linear_decoder(x) x = self.unflatten(x) x = self.decoder(x) return x def forward(self, x, **kwargs): y = kwargs['labels'] y = torch.nn.functional.one_hot(y, num_classes=self.num_classes) mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) z = torch.cat([z, y], dim=1) z = self.decode(z) return (mu, logvar, z) def sample(self, labels: list): y = torch.tensor(labels, dtype=torch.int64).to(self.dummy_param.device) y = torch.nn.functional.one_hot(y, num_classes=self.num_classes) z = torch.randn(y.size()[0], 32).to(self.dummy_param.device) z = torch.cat([z, y], dim=1) return self.decode(z) def loss_vae(x, mu, logsigma, reconstruction): kl = KL_divergence(mu, logsigma) ll = log_likelihood(x, reconstruction) return kl + ll def KL_divergence(mu, logvar): """ часть функции потерь, которая отвечает за "близость" латентных представлений разных людей """ loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return loss def log_likelihood(x, reconstruction): """ часть функции потерь, которая отвечает за качество реконструкции (как mse в обычном autoencoder) """ loss = nn.BCELoss(reduction='sum') return loss(reconstruction, x) def loss_vae(x, mu, logsigma, reconstruction): kl = KL_divergence(mu, logsigma) ll = log_likelihood(x, reconstruction) return kl + ll batch_size = 32 size = 32 transform = transforms.Compose([transforms.Resize(size), transforms.ToTensor()]) train_dataset = datasets.MNIST(root='./mnist_data/', transform=transform, train=True, download=True) test_dataset = datasets.MNIST(root='./mnist_data/', transform=transform, train=False, download=False) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) criterion = loss_vae autoencoder = CVAE(num_input_channels=1, base_channel_size=32, num_classes=train_dataset.targets.unique().size()[0], latent_dim=dim_code) optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.001) def train(model, opt, loss_fn, epochs, data_tr, data_val, scheduler=None, device='cpu', show=True, show_num=3): from time import time from tqdm.autonotebook import tqdm model = model.to(device) X_val, Y_val = next(iter(data_val)) train_losses = [] val_losses = [] log_template = 'Epoch {ep:03d}/{epochs:03d} train loss: {t_loss:0.4f} val loss {v_loss:0.4f}' with tqdm(desc='epoch', total=epochs) as pbar_outer: for epoch in range(epochs): tic = time() avg_loss = 0 model.train() for X_batch, Y_batch in data_tr: X_batch = X_batch.to(device, dtype=torch.float32) Y_batch = Y_batch.to(device) opt.zero_grad() mu, logvar, X_pred = model(X_batch, labels=Y_batch) loss = loss_fn(X_batch, mu, logvar, X_pred) loss.backward() opt.step() avg_loss += loss / len(data_tr) toc = time() model.eval() mu, logvar, X_hat = model(X_val.to(device, dtype=torch.float32), labels=Y_val.to(device)) X_hat = X_hat.detach().to('cpu') train_losses.append(avg_loss.item()) val_losses.append(loss_fn(X_val, mu, logvar, X_hat).item()) nums = np.random.randint(10, size=show_num) output_nums = model.sample(nums).detach() output_nums = output_nums.detach().to('cpu') if scheduler: scheduler.step() pbar_outer.update(1) if show: clear_output(wait=True) plt.clf() for k in range(show_num): plt.axis('off') plt.axis('off') plt.axis('off') else: tqdm.write(log_template.format(ep=epoch + 1, epochs=epochs, t_loss=train_losses[-1], v_loss=val_losses[-1])) return (train_losses, val_losses, X_hat, mu, logvar) from torch.optim import lr_scheduler lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) max_epochs = 20 cvae_train_loss, cvae_val_loss, cvae_predict_img_val, cvae_mu, cvae_logvar = train(model=autoencoder, opt=optimizer, loss_fn=criterion, epochs=max_epochs, data_tr=train_loader, data_val=test_loader, device=device, scheduler=lr_scheduler, show=True)
code
129040633/cell_12
[ "text_plain_output_1.png" ]
from skimage import io from sklearn.model_selection import train_test_split from torchvision import datasets, transforms import numpy as np import os import pandas as pd import torch import torch.nn as nn device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def fetch_dataset(path, attrs_name='lfw_attributes.txt', images_name='lfw-deepfunneled', dx=80, dy=80, dimx=64, dimy=64): if not os.path.exists(images_name): os.system('wget http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz -O tmp.tgz') os.system('tar xvzf tmp.tgz && rm tmp.tgz') assert os.path.exists(images_name) if not os.path.exists(attrs_name): os.system('wget http://www.cs.columbia.edu/CAVE/databases/pubfig/download/%s' % attrs_name) transform = transforms.Compose([transforms.ToPILImage(), transforms.CenterCrop((dx, dy)), transforms.Resize((dimx, dimy)), transforms.ToTensor()]) df_attrs = pd.read_csv(os.path.join(path, attrs_name), sep='\t', skiprows=1) df_attrs = pd.DataFrame(df_attrs.iloc[:, :-1].values, columns=df_attrs.columns[1:]) photo_ids = [] for dirpath, dirnames, filenames in os.walk(os.path.join(path, images_name)): for fname in filenames: if fname.endswith('.jpg'): fpath = os.path.join(dirpath, fname) photo_id = fname[:-4].replace('_', ' ').split() person_id = ' '.join(photo_id[:-1]) photo_number = int(photo_id[-1]) photo_ids.append({'person': person_id, 'imagenum': photo_number, 'photo_path': fpath}) photo_ids = pd.DataFrame(photo_ids) df = pd.merge(df_attrs, photo_ids, on=('person', 'imagenum')) assert len(df) == len(df_attrs), 'lost some data when merging dataframes' all_photos = df['photo_path'].apply(io.imread).apply(transform) all_photos = all_photos.values all_attrs = df.drop(['photo_path', 'person', 'imagenum'], axis=1) return (all_photos, all_attrs) batch_size = 32 train_photos, val_photos, train_attrs, val_attrs = train_test_split(data, attrs, train_size=0.8, shuffle=False) data_tr = torch.utils.data.DataLoader(train_photos, batch_size=batch_size) data_val = torch.utils.data.DataLoader(val_photos, batch_size=batch_size) class CVAE(nn.Module): def __init__(self, base_channel_size: int, latent_dim: int, num_classes: int, num_input_channels: int=3, act_fn=nn.ReLU): super().__init__() self.dummy_param = nn.Parameter(torch.empty(0)) self.latent_dim = latent_dim self.c_hid = base_channel_size self.num_classes = num_classes conv_size = int(np.exp2(np.log2(self.c_hid) - 3)) ln_size = 2 * self.c_hid * conv_size * conv_size self.encoder = nn.Sequential(nn.Conv2d(num_input_channels, self.c_hid, kernel_size=3, padding=1, stride=2), act_fn(), nn.Conv2d(self.c_hid, self.c_hid, kernel_size=3, padding=1), act_fn(), nn.Conv2d(self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1, stride=2), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1, stride=2), act_fn()) self.flatten = nn.Flatten(start_dim=1) self.linear_mu = nn.Sequential(nn.Linear(ln_size, latent_dim)) self.linear_logvar = nn.Sequential(nn.Linear(ln_size, latent_dim)) self.linear_decoder = nn.Sequential(nn.Linear(latent_dim + num_classes, ln_size), act_fn()) self.unflatten = nn.Sequential(nn.Unflatten(dim=1, unflattened_size=(2 * self.c_hid, conv_size, conv_size))) self.decoder = nn.Sequential(nn.ConvTranspose2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), act_fn(), nn.Conv2d(2 * self.c_hid, 2 * self.c_hid, kernel_size=3, padding=1), act_fn(), nn.ConvTranspose2d(2 * self.c_hid, self.c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), act_fn(), nn.Conv2d(self.c_hid, self.c_hid, kernel_size=3, padding=1), act_fn(), nn.ConvTranspose2d(self.c_hid, num_input_channels, kernel_size=3, output_padding=1, padding=1, stride=2), nn.Sigmoid()) def encode(self, x): x = self.encoder(x) x = self.flatten(x) mu = self.linear_mu(x) logvar = self.linear_logvar(x) return (mu, logvar) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar / 2) eps = torch.randn_like(std) return eps * std + mu else: return mu def decode(self, x): x = self.linear_decoder(x) x = self.unflatten(x) x = self.decoder(x) return x def forward(self, x, **kwargs): y = kwargs['labels'] y = torch.nn.functional.one_hot(y, num_classes=self.num_classes) mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) z = torch.cat([z, y], dim=1) z = self.decode(z) return (mu, logvar, z) def sample(self, labels: list): y = torch.tensor(labels, dtype=torch.int64).to(self.dummy_param.device) y = torch.nn.functional.one_hot(y, num_classes=self.num_classes) z = torch.randn(y.size()[0], 32).to(self.dummy_param.device) z = torch.cat([z, y], dim=1) return self.decode(z) def loss_vae(x, mu, logsigma, reconstruction): kl = KL_divergence(mu, logsigma) ll = log_likelihood(x, reconstruction) return kl + ll def KL_divergence(mu, logvar): """ часть функции потерь, которая отвечает за "близость" латентных представлений разных людей """ loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return loss def log_likelihood(x, reconstruction): """ часть функции потерь, которая отвечает за качество реконструкции (как mse в обычном autoencoder) """ loss = nn.BCELoss(reduction='sum') return loss(reconstruction, x) def loss_vae(x, mu, logsigma, reconstruction): kl = KL_divergence(mu, logsigma) ll = log_likelihood(x, reconstruction) return kl + ll batch_size = 32 size = 32 transform = transforms.Compose([transforms.Resize(size), transforms.ToTensor()]) train_dataset = datasets.MNIST(root='./mnist_data/', transform=transform, train=True, download=True) test_dataset = datasets.MNIST(root='./mnist_data/', transform=transform, train=False, download=False) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
code
122248863/cell_4
[ "text_plain_output_1.png" ]
import random import random list = [] for i in range(5): list.append(random.randint(1, 10)) list.sort() list = [] for i in range(1, 11, 2): list.append(i) buah = ['Anggur', 'Jambu', 'Apel', 'Pisang', 'Semangka'] print('List with Slicing = ', buah[2:5]) print('Panjang List ini =', len(list))
code
122248863/cell_2
[ "text_plain_output_1.png" ]
import random import random list = [] for i in range(5): list.append(random.randint(1, 10)) print('Contoh list acak :', list) list.sort() print('Lalu diurutkan :', list)
code
122248863/cell_3
[ "text_plain_output_1.png" ]
import random import random list = [] for i in range(5): list.append(random.randint(1, 10)) list.sort() list = [] for i in range(1, 11, 2): list.append(i) print('Contoh list dengan angka ganjil : \n', list)
code
122248863/cell_5
[ "text_plain_output_1.png" ]
mytupple = ((1, 2, 3, 4, 5, 6), ('A', 'N', 'G', 'G', 'U', 'R')) for i in mytupple: for j in i: print(j)
code
16129261/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel())
code
16129261/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() print('Question 2', '\n', '\n') print('ZIP code with highest default rate:', default_by_zip.idxmax())
code
16129261/cell_20
[ "text_plain_output_1.png" ]
print('The criterion of demographic parity allows us to examine whether the fraction of applicants getting loans is the same across groups.') print('As the above data shows, the model estimates substantially higher default rates for minority applicants (4.6%) compared to non-minority applicants (0.1%).') print('We also observe a discrepancy between female (2.8%) and male applicants (1.9%), though to a lesser degree.') print('Differences in the “positive rate” across groups indicates that the loan granting scheme is not making loans to each group at the same rate.') print('This means that the criteria of demographic parity has not been achieved.')
code
16129261/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() print('Question 4', '\n', '\n') print('Correlation between age and income:', train['income'].corr(train['age']) * 100, '%')
code
16129261/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) print('Question 6', '\n', '\n') print('Out of bag score:', clf.oob_score_ * 100, '%')
code
16129261/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) y_test = test[['default']] x_test = test[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_test = pd.get_dummies(data=x_test) out_sample_pred = clf.predict(x_test) test['out_sample_pred'] = out_sample_pred minority_default = test.out_sample_pred.groupby(test.minority).mean() female = test[test.sex == 1] male = test[test.sex == 0] minority = test[test.minority == 1] non_minority = test[test.minority == 0] print('Question 11', '\n', '\n') print('Percentage of accepted and rejected - Minority applicants', '\n', '\n', minority.out_sample_pred.value_counts(True) * 100, '\n') print('Percentage of accepted and rejected - Non-minority applicants', '\n', '\n', non_minority.out_sample_pred.value_counts(True) * 100, '\n') print('Percentage of accepted and rejected - Female applicants', '\n', '\n', female.out_sample_pred.value_counts(True) * 100, '\n') print('Percentage of accepted and rejected - Male applicants', '\n', '\n', male.out_sample_pred.value_counts(True) * 100, '\n')
code
16129261/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import numpy as np import pandas as pd import sklearn as sklearn import sklearn.model_selection as sklearn_model_selection import sklearn.ensemble as sklearn_ensemble import os print(os.listdir('../input'))
code
16129261/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) y_test = test[['default']] x_test = test[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_test = pd.get_dummies(data=x_test) out_sample_pred = clf.predict(x_test) test['out_sample_pred'] = out_sample_pred minority_default = test.out_sample_pred.groupby(test.minority).mean() print('Question 8', '\n', '\n') print('Default rate for non-minorities:', minority_default[0] * 100, '%')
code
16129261/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) y_test = test[['default']] x_test = test[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_test = pd.get_dummies(data=x_test) out_sample_pred = clf.predict(x_test) test['out_sample_pred'] = out_sample_pred minority_default = test.out_sample_pred.groupby(test.minority).mean() print('Question 9', '\n', '\n') print('Default rate for minorities:', minority_default[1] * 100, '%')
code
16129261/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) print('Question 1:', '\n', '\n', 'Percentage of training set loans in default:', loans_in_default[1] * 100, '%')
code
16129261/cell_17
[ "text_plain_output_1.png" ]
print('Question 10', '\n', '\n') print('The loan granting scheme is group unaware. The model calculates the default probability of each applicants and then applies the same cut-off (50%) to all groups')
code
16129261/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) y_test = test[['default']] x_test = test[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_test = pd.get_dummies(data=x_test) out_sample_pred = clf.predict(x_test) print('Question 7', '\n', '\n') print('Out-of-sample accuracy:', sklearn.metrics.accuracy_score(out_sample_pred, y_test) * 100, '%')
code
16129261/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn as sklearn #machine learning test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() y_train = train['default'] x_train = train[['rent', 'education', 'income', 'loan_size', 'payment_timing', 'job_stability', 'ZIP', 'occupation']] x_train = pd.get_dummies(x_train) clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=4, random_state=42, oob_score=True, n_jobs=-1) clf.fit(x_train, y_train.values.ravel()) print('Question 5', '\n', '\n') print('In-sample accuracy:', sklearn.metrics.accuracy_score(clf.predict(x_train), y_train) * 100, '%')
code
16129261/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv', low_memory=False) train = pd.read_csv('../input/train.csv', low_memory=False) loans_in_default = train.default.value_counts(True) default_by_zip = train.default.groupby(train.ZIP).mean() default_by_year = train.default.groupby(train.year).mean() print('Question 3', '\n', '\n') print('Default rate in the first year for which we have data:', default_by_year[0] * 100, '%')
code
90105070/cell_4
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id') train.time = pd.to_datetime(train.time) train['daytime_id'] = ((train.time.dt.hour * 60 + train.time.dt.minute) / 20).astype(int) train = train.set_index('row_id', drop=True) train['roadway'] = train.x.astype('str') + '_' + train.y.astype('str') + '_' + train.direction.astype('str') train['day_of_week'] = train.time.dt.dayofweek test.time = pd.to_datetime(test.time) test['roadway'] = test.x.astype('str') + '_' + test.y.astype('str') + '_' + test.direction.astype('str') test['day_of_week'] = test.time.dt.dayofweek plot = sns.histplot(train['congestion']) plot.set_title('Congestion Histogram') plt.show()
code
90105070/cell_6
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id') train.time = pd.to_datetime(train.time) train['daytime_id'] = ((train.time.dt.hour * 60 + train.time.dt.minute) / 20).astype(int) train = train.set_index('row_id', drop=True) train['roadway'] = train.x.astype('str') + '_' + train.y.astype('str') + '_' + train.direction.astype('str') train['day_of_week'] = train.time.dt.dayofweek test.time = pd.to_datetime(test.time) test['roadway'] = test.x.astype('str') + '_' + test.y.astype('str') + '_' + test.direction.astype('str') test['day_of_week'] = test.time.dt.dayofweek # Histogram of all congestions plot = sns.histplot(train['congestion']) plot.set_title('Congestion Histogram') plt.show() rw = train.roadway.unique() i = 0 while i < len(rw): fig, axs = plt.subplots(1, 5, figsize=(10, 3)) sns.histplot(data=train, x=train.congestion[train.roadway == rw[i]], kde=True, color='skyblue', ax=axs[0]) axs[0].set_title(f'{rw[i]}') sns.histplot(data=train, x=train.congestion[train.roadway == rw[i + 1]], kde=True, color='skyblue', ax=axs[1]) axs[1].set_title(f'{rw[i + 1]}') sns.histplot(data=train, x=train.congestion[train.roadway == rw[i + 2]], kde=True, color='skyblue', ax=axs[2]) axs[2].set_title(f'{rw[i + 2]}') sns.histplot(data=train, x=train.congestion[train.roadway == rw[i + 3]], kde=True, color='skyblue', ax=axs[3]) axs[3].set_title(f'{rw[i + 3]}') sns.histplot(data=train, x=train.congestion[train.roadway == rw[i + 4]], kde=True, color='skyblue', ax=axs[4]) axs[4].set_title(f'{rw[i + 4]}') i += 5 plt.tight_layout() plt.show()
code
90105070/cell_8
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id') train.time = pd.to_datetime(train.time) train['daytime_id'] = ((train.time.dt.hour * 60 + train.time.dt.minute) / 20).astype(int) train = train.set_index('row_id', drop=True) train['roadway'] = train.x.astype('str') + '_' + train.y.astype('str') + '_' + train.direction.astype('str') train['day_of_week'] = train.time.dt.dayofweek test.time = pd.to_datetime(test.time) test['roadway'] = test.x.astype('str') + '_' + test.y.astype('str') + '_' + test.direction.astype('str') test['day_of_week'] = test.time.dt.dayofweek # Histogram of all congestions plot = sns.histplot(train['congestion']) plot.set_title('Congestion Histogram') plt.show() rw = train.roadway.unique() i=0 while i < len(rw): fig, axs = plt.subplots(1,5,figsize=(10, 3)) sns.histplot(data=train,x=train.congestion[train.roadway==rw[i]], kde=True, color="skyblue",ax=axs[0]) axs[0].set_title(f'{rw[i]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+1]], kde=True, color="skyblue",ax=axs[1]) axs[1].set_title(f'{rw[i+1]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+2]], kde=True, color="skyblue",ax=axs[2]) axs[2].set_title(f'{rw[i+2]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+3]], kde=True, color="skyblue",ax=axs[3]) axs[3].set_title(f'{rw[i+3]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+4]], kde=True, color="skyblue",ax=axs[4]) axs[4].set_title(f'{rw[i+4]}') i+=5 plt.tight_layout() plt.show() dt = train.daytime_id.unique() i = 0 while i < len(dt): fig, axs = plt.subplots(1, 4, figsize=(10, 3)) sns.histplot(data=train, x=train.congestion[train.daytime_id == dt[i]], kde=True, color='skyblue', ax=axs[0]) axs[0].set_title(f'{dt[i]}') sns.histplot(data=train, x=train.congestion[train.daytime_id == dt[i + 1]], kde=True, color='skyblue', ax=axs[1]) axs[1].set_title(f'{dt[i + 1]}') sns.histplot(data=train, x=train.congestion[train.daytime_id == dt[i + 2]], kde=True, color='skyblue', ax=axs[2]) axs[2].set_title(f'{dt[i + 2]}') sns.histplot(data=train, x=train.congestion[train.daytime_id == dt[i + 3]], kde=True, color='skyblue', ax=axs[3]) axs[3].set_title(f'{dt[i + 3]}') i += 4 plt.tight_layout() plt.show()
code
90105070/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id') train.time = pd.to_datetime(train.time) train['daytime_id'] = ((train.time.dt.hour * 60 + train.time.dt.minute) / 20).astype(int) train = train.set_index('row_id', drop=True) train['roadway'] = train.x.astype('str') + '_' + train.y.astype('str') + '_' + train.direction.astype('str') train['day_of_week'] = train.time.dt.dayofweek test.time = pd.to_datetime(test.time) test['roadway'] = test.x.astype('str') + '_' + test.y.astype('str') + '_' + test.direction.astype('str') test['day_of_week'] = test.time.dt.dayofweek # Histogram of all congestions plot = sns.histplot(train['congestion']) plot.set_title('Congestion Histogram') plt.show() rw = train.roadway.unique() i=0 while i < len(rw): fig, axs = plt.subplots(1,5,figsize=(10, 3)) sns.histplot(data=train,x=train.congestion[train.roadway==rw[i]], kde=True, color="skyblue",ax=axs[0]) axs[0].set_title(f'{rw[i]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+1]], kde=True, color="skyblue",ax=axs[1]) axs[1].set_title(f'{rw[i+1]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+2]], kde=True, color="skyblue",ax=axs[2]) axs[2].set_title(f'{rw[i+2]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+3]], kde=True, color="skyblue",ax=axs[3]) axs[3].set_title(f'{rw[i+3]}') sns.histplot(data=train,x=train.congestion[train.roadway==rw[i+4]], kde=True, color="skyblue",ax=axs[4]) axs[4].set_title(f'{rw[i+4]}') i+=5 plt.tight_layout() plt.show() dt = train.daytime_id.unique() i=0 while i < len(dt): fig, axs = plt.subplots(1,4,figsize=(10, 3)) sns.histplot(data=train,x=train.congestion[train.daytime_id==dt[i]], kde=True, color="skyblue",ax=axs[0]) axs[0].set_title(f'{dt[i]}') sns.histplot(data=train,x=train.congestion[train.daytime_id==dt[i+1]], kde=True, color="skyblue",ax=axs[1]) axs[1].set_title(f'{dt[i+1]}') sns.histplot(data=train,x=train.congestion[train.daytime_id==dt[i+2]], kde=True, color="skyblue",ax=axs[2]) axs[2].set_title(f'{dt[i+2]}') sns.histplot(data=train,x=train.congestion[train.daytime_id==dt[i+3]], kde=True, color="skyblue",ax=axs[3]) axs[3].set_title(f'{dt[i+3]}') i+=4 plt.tight_layout() plt.show() dow = train.time.dt.dayofweek.unique() i = 0 while i < len(dow): sns.histplot(data=train, x=train.congestion[train.time.dt.dayofweek == dow[i]], kde=True, color='skyblue').set(title=f'{dow[i]}') i += 1 plt.show()
code
2029345/cell_9
[ "image_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y)
code
2029345/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10)
code
2029345/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) col_interest = ['ScreenPorch', 'MoSold', 'LotShape', 'SaleType', 'SaleCondition'] sa = data_train[col_interest] sa.describe()
code
2029345/cell_34
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_test_predictors = pd.get_dummies(test_predictors) final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1) final_train final_test import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data_train = pd.read_csv('../input/train.csv') data_train.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data_train.SalePrice X = data_train.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_Y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) data_train.columns
code
2029345/cell_33
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_test_predictors = pd.get_dummies(test_predictors) final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1) final_train final_test import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data_train = pd.read_csv('../input/train.csv') data_train.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data_train.SalePrice X = data_train.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_Y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_X, train_Y, verbose=False) predictions = my_model.predict(test_X) from sklearn.metrics import mean_absolute_error my_model = XGBRegressor(n_estimators=1000, learning_rate=0.5) my_model.fit(train_X, train_y, early_stopping_rounds=5, eval_set=[(test_X, test_y)], verbose=False)
code
2029345/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) print(predict_prices)
code
2029345/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors
code
2029345/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors
code
2029345/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) print(data_train.columns)
code
2029345/cell_11
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_Home_prices = housing_model.predict(X) mean_absolute_error(y, predicted_Home_prices)
code
2029345/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y)
code
2029345/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X
code
2029345/cell_32
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_test_predictors = pd.get_dummies(test_predictors) final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1) final_train final_test import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data_train = pd.read_csv('../input/train.csv') data_train.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data_train.SalePrice X = data_train.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_Y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_X, train_Y, verbose=False) predictions = my_model.predict(test_X) from sklearn.metrics import mean_absolute_error print('Mean absolute error :' + str(mean_absolute_error(predictions, test_y)))
code
2029345/cell_28
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_test_predictors = pd.get_dummies(test_predictors) final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1) final_train final_test
code
2029345/cell_15
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae for max_leaf_nodes in [5, 50, 500, 5000]: my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y) print('Max leaf nodes :%d \t\t Mean Absolute Error: %d' % (max_leaf_nodes, my_mae))
code
2029345/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) data_train
code
2029345/cell_17
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) predict_vals = forest_model.predict(val_X) print(mean_absolute_error(val_y, predict_vals))
code
2029345/cell_31
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_test_predictors = pd.get_dummies(test_predictors) final_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors, join='left', axis=1) final_train final_test import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data_train = pd.read_csv('../input/train.csv') data_train.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data_train.SalePrice X = data_train.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_Y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_X, train_Y, verbose=False)
code
2029345/cell_10
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) print(' making predictions for the following 5 houses:') print(X.head()) print('The prediction are') print(housing_model.predict(X.head()))
code
2029345/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_X = test[predictor_cols] predict_prices = my_model.predict(test_X) my_submission = pd.DataFrame({'Id': test.Id, 'SalePrice': predict_prices}) my_submission.to_csv('submission.csv', index=False) import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.dropna(axis=0, subset=['SalePrice'], inplace=True) target = train_data.SalePrice cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()] candidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1) candidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1) low_cardinality_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].nunique() < 10 and candidate_train_predictors[cname].dtype == 'object'] numeric_cols = [cname for cname in candidate_train_predictors.columns if candidate_train_predictors[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numeric_cols train_predictors = candidate_train_predictors[my_cols] test_predictors = candidate_test_predictors[my_cols] train_predictors.dtypes.sample(10) one_hot_encoded_training_predictors = pd.get_dummies(train_predictors) one_hot_encoded_training_predictors from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor def get_mae(X, y): return -1 * cross_val_score(RandomForestRegressor(50), X, y, scoring='neg_mean_absolute_error').mean() predictors_without_categoricals = train_predictors.select_dtypes(exclude=['object']) mae_without_categoricals = get_mae(one_hot_encoded_training_predictors, target) mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target) print('Mean Absolute Error when Dropping Categoricals:' + str(int(mae_without_categoricals))) print('Mean Absolute Error with One-Hot Encoding:' + str(int(mae_one_hot_encoded)))
code
2029345/cell_12
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data_train = pd.read_csv(main_file_path) y = data_train.SalePrice predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF', 'LotShape', 'SaleType', 'SaleCondition'] one_hot_encoded_training_predictors = pd.get_dummies(predicators) one_hot_encoded_training_predictors X = data_train[predicators] X from sklearn.tree import DecisionTreeRegressor housing_model = DecisionTreeRegressor() housing_model.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_Home_prices = housing_model.predict(X) mean_absolute_error(y, predicted_Home_prices) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0) housing_model = DecisionTreeRegressor() housing_model.fit(train_X, train_y) val_predictions = housing_model.predict(val_X) print(mean_absolute_error(val_y, val_predictions))
code
73067465/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold from sklearn.experimental import enable_iterative_imputer from sklearn.impute import KNNImputer, IterativeImputer from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier sns.set_style('whitegrid') from sklearn.metrics import accuracy_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submit = pd.DataFrame(test['PassengerId']) train['title'] = 0 for i in range(0, len(train)): train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:] train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True) train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True) for i in range(len(train)): if not pd.isnull(train['Cabin'].iloc[i]): train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0] train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True) train['Fare'] = np.sqrt(train['Fare']) train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True) fig,ax=plt.subplots(3,1,figsize=(15,13)) sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson sns.catplot(x='Embarked', data=train, kind='count', hue='Survived', col='Sex')
code
73067465/cell_33
[ "text_plain_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,VotingClassifier from sklearn.impute import KNNImputer,IterativeImputer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler,RobustScaler,StandardScaler from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold from sklearn.experimental import enable_iterative_imputer from sklearn.impute import KNNImputer, IterativeImputer from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier sns.set_style('whitegrid') from sklearn.metrics import accuracy_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submit = pd.DataFrame(test['PassengerId']) train['title'] = 0 for i in range(0, len(train)): train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:] train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True) train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True) for i in range(len(train)): if not pd.isnull(train['Cabin'].iloc[i]): train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0] train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True) train['Fare'] = np.sqrt(train['Fare']) train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True) fig,ax=plt.subplots(3,1,figsize=(15,13)) sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson train = pd.get_dummies(train, columns=['Pclass', 'Embarked', 'title', 'family'], drop_first=True) impute = KNNImputer(n_neighbors=13) train = pd.DataFrame(impute.fit_transform(train), columns=train.columns) model = [] model.append(('Logistic Regression', LogisticRegression(max_iter=1000))) model.append(('LDA', LinearDiscriminantAnalysis())) model.append(('SVC', SVC(kernel='rbf'))) model.append(('DTC', DecisionTreeClassifier())) model.append(('GBC', GradientBoostingClassifier())) model.append(('RFC', RandomForestClassifier())) model.append(('Kneig', KNeighborsClassifier())) x = train.drop('Survived', axis=1) y = train['Survived'] xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, test_size=0.3) scores = [] for name, models in model: pipeline = Pipeline(steps=[('scale', MinMaxScaler()), ('model', models)]) cv = StratifiedKFold(n_splits=10, random_state=21, shuffle=True) score = cross_val_score(pipeline, x, y, cv=cv, scoring='accuracy', n_jobs=-1) scores.append((name, np.mean(score))) scores
code
73067465/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submit = pd.DataFrame(test['PassengerId']) train['title'] = 0 for i in range(0, len(train)): train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:] train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True) train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True) for i in range(len(train)): if not pd.isnull(train['Cabin'].iloc[i]): train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0] train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True) train['Fare'] = np.sqrt(train['Fare']) train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True) train.hist(figsize=(15, 10)) plt.show()
code
73067465/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold from sklearn.experimental import enable_iterative_imputer from sklearn.impute import KNNImputer, IterativeImputer from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier sns.set_style('whitegrid') from sklearn.metrics import accuracy_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submit = pd.DataFrame(test['PassengerId']) train['title'] = 0 for i in range(0, len(train)): train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:] train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True) train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True) for i in range(len(train)): if not pd.isnull(train['Cabin'].iloc[i]): train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0] train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True) train['Fare'] = np.sqrt(train['Fare']) train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True) fig,ax=plt.subplots(3,1,figsize=(15,13)) sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson sns.countplot(x='family', data=train, hue='Survived')
code
73067465/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code