path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128021494/cell_8
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import random import tensorflow as tf data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',') data = np.delete(data, [0, 1], axis=1) file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r') lines = file.readlines() count = 0 labels = np.zeros((data.shape[0], 1)) for line in lines: if line[0] == 'M': labels[count] = 0 else: labels[count] = 1 count = count + 1 data_comb = [] for i in range(569): data_comb.append((data[i], labels[i])) random.shuffle(data_comb) data = np.empty((569, 30)) for i in range(569): data[i] = np.array(data_comb[i][0]) labels[i] = data_comb[i][1] border = int(data.shape[0] * 0.75) train_x = data[0:border] train_y = labels[0:border] test_x = data[border + 1:data.shape[0]] test_y = labels[border + 1:data.shape[0]] from sklearn.preprocessing import StandardScaler sc = StandardScaler() train_x = sc.fit_transform(train_x) test_x = sc.transform(test_x) model = tf.keras.Sequential() model.add(tf.keras.layers.Input(30)) model.add(tf.keras.layers.Dense(512, activation='relu')) model.add(tf.keras.layers.Dense(256, activation='relu')) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002), loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.TruePositives(), tf.keras.metrics.FalsePositives(), tf.keras.metrics.TrueNegatives(), tf.keras.metrics.FalseNegatives()]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) batch_size = 64 history = model.fit(train_x, train_y, batch_size=batch_size, epochs=1) dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).batch(batch_size) history = model.fit(dataset, epochs=15) val_dataset = tf.data.Dataset.from_tensor_slices((test_x, test_y)).batch(batch_size) history = model.fit(dataset, epochs=1, validation_data=val_dataset) print(history.history)
code
128021494/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',') data = np.delete(data, [0, 1], axis=1) print(data.shape) file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r') lines = file.readlines() count = 0 labels = np.zeros((data.shape[0], 1)) for line in lines: if line[0] == 'M': labels[count] = 0 else: labels[count] = 1 count = count + 1 print(labels.shape) plt.hist(labels)
code
50212911/cell_13
[ "image_output_1.png" ]
import xgboost import xgboost xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, booster='gbtree') xgBoost.fit(X_train, Y_train) print('train score', xgBoost.score(X_train, Y_train)) print('test score', xgBoost.score(X_test, Y_test))
code
50212911/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) plt.xticks(rotation=90) del df['Alley'] del df['PoolQC'] del df['MiscFeature'] df['MasVnrType'].fillna(value='0', inplace=True) df['MasVnrArea'].fillna(value=0.0, inplace=True) df['BsmtQual'].fillna(value='0', inplace=True) df['BsmtCond'].fillna(value='0', inplace=True) df['BsmtExposure'].fillna(value='0', inplace=True) df['BsmtFinType1'].fillna(value='0', inplace=True) df['BsmtFinType2'].fillna(value='0', inplace=True) df['FireplaceQu'].fillna(value='0', inplace=True) df['Electrical'].fillna(value='0', inplace=True) df['GarageType'].fillna(value='0', inplace=True) df['GarageYrBlt'].fillna(value=0.0, inplace=True) df['GarageFinish'].fillna(value='0', inplace=True) df['GarageQual'].fillna(value='0', inplace=True) df['GarageCond'].fillna(value='0', inplace=True) df['Fence'].fillna(value='0', inplace=True) df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean())) df = pd.get_dummies(df) sns.heatmap(df.corr(), xticklabels=True, yticklabels=True) plt.show()
code
50212911/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.info()
code
50212911/cell_20
[ "text_plain_output_1.png" ]
from sklearn import ensemble import catboost import lightgbm import lightgbm import xgboost import xgboost import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) import catboost cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False) cboost.fit(X_train, Y_train) import xgboost xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, booster='gbtree') xgBoost.fit(X_train, Y_train) import lightgbm lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100) lgbreg.fit(X_train, Y_train) import sklearn params = {'learning_rate': [0.05], 'n_estimators': [200], 'max_depth': [6]} gsc = GridSearchCV(estimator=ensemble.GradientBoostingRegressor(), param_grid=params, cv=3) grid_result = gsc.fit(X_train, Y_train) sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=200, max_depth=6) sklearn_boost.fit(X_train, Y_train) from catboost import CatBoost params = {'depth': [7], 'learning_rate': [0.15], 'l2_leaf_reg': [15, 20, 25], 'iterations': [300], 'verbose': [False]} gsc = GridSearchCV(estimator=catboost.CatBoostRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False, learning_rate=0.15, l2_leaf_reg=20, iterations=300) cboost.fit(X_train, Y_train) import xgboost params = [{'learning_rate': [0.2], 'n_estimators': [250], 'max_depth': [3]}] gsc = GridSearchCV(estimator=xgboost.XGBRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.2, n_estimators=250, booster='gbtree') xgBoost.fit(X_train, Y_train) import lightgbm params = [{'regressor__regressor__boosting_type': ['gbdt'], 'regressor__regressor__n_estimators': [100], 'regressor__regressor__max_depth': [20], 'regressor__regressor__learning_rate': [0.1], 'regressor__regressor__num_leaves': [31]}] gsc = GridSearchCV(estimator=lightgbm.LGBMRegressor(), param_grid=params, cv=320, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100, max_depth=20) lgbreg.fit(X_train, Y_train) print('train score', lgbreg.score(X_train, Y_train)) print('test score', lgbreg.score(X_test, Y_test))
code
50212911/cell_11
[ "text_plain_output_1.png" ]
from sklearn import ensemble import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) print('train score', sklearn_boost.score(X_train, Y_train)) print('test score', sklearn_boost.score(X_test, Y_test))
code
50212911/cell_19
[ "text_plain_output_1.png" ]
from sklearn import ensemble import catboost import xgboost import xgboost import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) import catboost cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False) cboost.fit(X_train, Y_train) import xgboost xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, booster='gbtree') xgBoost.fit(X_train, Y_train) import sklearn params = {'learning_rate': [0.05], 'n_estimators': [200], 'max_depth': [6]} gsc = GridSearchCV(estimator=ensemble.GradientBoostingRegressor(), param_grid=params, cv=3) grid_result = gsc.fit(X_train, Y_train) sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=200, max_depth=6) sklearn_boost.fit(X_train, Y_train) from catboost import CatBoost params = {'depth': [7], 'learning_rate': [0.15], 'l2_leaf_reg': [15, 20, 25], 'iterations': [300], 'verbose': [False]} gsc = GridSearchCV(estimator=catboost.CatBoostRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False, learning_rate=0.15, l2_leaf_reg=20, iterations=300) cboost.fit(X_train, Y_train) import xgboost params = [{'learning_rate': [0.2], 'n_estimators': [250], 'max_depth': [3]}] gsc = GridSearchCV(estimator=xgboost.XGBRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.2, n_estimators=250, booster='gbtree') xgBoost.fit(X_train, Y_train) print('train score', xgBoost.score(X_train, Y_train)) print('test score', xgBoost.score(X_test, Y_test))
code
50212911/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50212911/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) plt.xticks(rotation=90) del df['Alley'] del df['PoolQC'] del df['MiscFeature'] df['MasVnrType'].fillna(value='0', inplace=True) df['MasVnrArea'].fillna(value=0.0, inplace=True) df['BsmtQual'].fillna(value='0', inplace=True) df['BsmtCond'].fillna(value='0', inplace=True) df['BsmtExposure'].fillna(value='0', inplace=True) df['BsmtFinType1'].fillna(value='0', inplace=True) df['BsmtFinType2'].fillna(value='0', inplace=True) df['FireplaceQu'].fillna(value='0', inplace=True) df['Electrical'].fillna(value='0', inplace=True) df['GarageType'].fillna(value='0', inplace=True) df['GarageYrBlt'].fillna(value=0.0, inplace=True) df['GarageFinish'].fillna(value='0', inplace=True) df['GarageQual'].fillna(value='0', inplace=True) df['GarageCond'].fillna(value='0', inplace=True) df['Fence'].fillna(value='0', inplace=True) df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean())) df = pd.get_dummies(df) plt.figure(figsize=(10, 8)) sns.distplot(df['SalePrice']) plt.show()
code
50212911/cell_18
[ "text_plain_output_1.png" ]
from sklearn import ensemble import catboost import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) import catboost cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False) cboost.fit(X_train, Y_train) import sklearn params = {'learning_rate': [0.05], 'n_estimators': [200], 'max_depth': [6]} gsc = GridSearchCV(estimator=ensemble.GradientBoostingRegressor(), param_grid=params, cv=3) grid_result = gsc.fit(X_train, Y_train) sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=200, max_depth=6) sklearn_boost.fit(X_train, Y_train) from catboost import CatBoost params = {'depth': [7], 'learning_rate': [0.15], 'l2_leaf_reg': [15, 20, 25], 'iterations': [300], 'verbose': [False]} gsc = GridSearchCV(estimator=catboost.CatBoostRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False, learning_rate=0.15, l2_leaf_reg=20, iterations=300) cboost.fit(X_train, Y_train) print('train score', cboost.score(X_train, Y_train)) print('test score', cboost.score(X_test, Y_test))
code
50212911/cell_15
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import StackingRegressor from sklearn.linear_model import RidgeCV from sklearn.svm import LinearSVR import warnings from sklearn.linear_model import RidgeCV from sklearn.svm import LinearSVR from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import StackingRegressor import warnings warnings.filterwarnings('ignore') estimators = [('lr', RidgeCV()), ('svr', LinearSVR(random_state=42, max_iter=1000))] regStack = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42)) regStack.fit(X_train, Y_train) print('train score', regStack.score(X_train, Y_train)) print('test score', regStack.score(X_test, Y_test))
code
50212911/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor Begging = RandomForestRegressor(max_depth=30, n_estimators=300) Begging.fit(X_train, Y_train) print('train score', Begging.score(X_train, Y_train)) print('test score', Begging.score(X_test, Y_test))
code
50212911/cell_17
[ "text_plain_output_1.png" ]
from sklearn import ensemble import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) import sklearn params = {'learning_rate': [0.05], 'n_estimators': [200], 'max_depth': [6]} gsc = GridSearchCV(estimator=ensemble.GradientBoostingRegressor(), param_grid=params, cv=3) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=200, max_depth=6) sklearn_boost.fit(X_train, Y_train) print('train score', sklearn_boost.score(X_train, Y_train)) print('test score', sklearn_boost.score(X_test, Y_test))
code
50212911/cell_14
[ "image_output_1.png" ]
import lightgbm import lightgbm lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100) lgbreg.fit(X_train, Y_train) print('train score', lgbreg.score(X_train, Y_train)) print('test score', lgbreg.score(X_test, Y_test))
code
50212911/cell_12
[ "image_output_1.png" ]
import catboost import catboost cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False) cboost.fit(X_train, Y_train) print('train score', cboost.score(X_train, Y_train)) print('test score', cboost.score(X_test, Y_test))
code
50212911/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) sns.barplot(x=nanColums.index, y=nanColums) plt.xticks(rotation=90)
code
106198039/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train df_train.describe(include='all')
code
106198039/cell_20
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] df_Y = df_train_Y.copy() df = df_train_X_good_cols.copy() df_test = df_test_X_good_cols.copy() df[['CryoSleep']] = df[['CryoSleep']].fillna(value=df[['CryoSleep']].mode().iloc[0, 0]) df_test[['CryoSleep']] = df_test[['CryoSleep']].fillna(value=df_test[['CryoSleep']].mode().iloc[0, 0]) num_cols = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df[num_cols] = df[num_cols].fillna(value=df[num_cols].median()) df_test[num_cols] = df_test[num_cols].fillna(value=df_test[num_cols].median()) df['CryoSleep'] = df['CryoSleep'].astype(int) df_test['CryoSleep'] = df_test['CryoSleep'].astype(int) df_Y['Transported'] = df_Y['Transported'].astype(int) X_train, X_test, y_train, y_test = train_test_split(df.to_numpy(), df_Y.to_numpy(), test_size=0.1) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators=1000) classifier.fit(X_train, y_train.ravel()) y_pred = classifier.predict(X_test) importances = classifier.feature_importances_ std = np.std([tree.feature_importances_ for tree in classifier.estimators_], axis=0) forest_importances = pd.Series(importances, index=good_columns) fig, ax = plt.subplots() forest_importances.plot.bar(yerr=std, ax=ax) ax.set_title("Feature importances using MDI") ax.set_ylabel("Mean decrease in impurity") fig.tight_layout() test_pred = classifier.predict(df_test.to_numpy()) df_test_df = pd.DataFrame(test_pred) a = df_test_original[['PassengerId']] b = df_test_df.astype(bool) pd.concat([a, b]) final_result = pd.concat([a, b], ignore_index=True, axis=1) final_result.columns = ['PassengerId', 'Transported'] final_result
code
106198039/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] print(col, unique) print_unique(df_train) df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns]
code
106198039/cell_11
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] df_Y = df_train_Y.copy() df = df_train_X_good_cols.copy() df_test = df_test_X_good_cols.copy() df[['CryoSleep']] = df[['CryoSleep']].fillna(value=df[['CryoSleep']].mode().iloc[0, 0]) df_test[['CryoSleep']] = df_test[['CryoSleep']].fillna(value=df_test[['CryoSleep']].mode().iloc[0, 0]) num_cols = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df[num_cols] = df[num_cols].fillna(value=df[num_cols].median()) df_test[num_cols] = df_test[num_cols].fillna(value=df_test[num_cols].median()) df['CryoSleep'] = df['CryoSleep'].astype(int) df_test['CryoSleep'] = df_test['CryoSleep'].astype(int) df_Y['Transported'] = df_Y['Transported'].astype(int) X_train, X_test, y_train, y_test = train_test_split(df.to_numpy(), df_Y.to_numpy(), test_size=0.1) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators=1000) classifier.fit(X_train, y_train.ravel()) y_pred = classifier.predict(X_test) print('ACCURACY OF THE MODEL: ', metrics.accuracy_score(y_test, y_pred))
code
106198039/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train a = df_test_original[['PassengerId']] a
code
106198039/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106198039/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] print_unique(df_train_X_good_cols)
code
106198039/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] df_Y = df_train_Y.copy() df = df_train_X_good_cols.copy() df_test = df_test_X_good_cols.copy() df[['CryoSleep']] = df[['CryoSleep']].fillna(value=df[['CryoSleep']].mode().iloc[0, 0]) df_test[['CryoSleep']] = df_test[['CryoSleep']].fillna(value=df_test[['CryoSleep']].mode().iloc[0, 0]) num_cols = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df[num_cols] = df[num_cols].fillna(value=df[num_cols].median()) df_test[num_cols] = df_test[num_cols].fillna(value=df_test[num_cols].median()) df['CryoSleep'] = df['CryoSleep'].astype(int) df_test['CryoSleep'] = df_test['CryoSleep'].astype(int) df_Y['Transported'] = df_Y['Transported'].astype(int) print('\ndf') print_unique(df) print('\ndf_Y') print_unique(df_Y) print('\ndf_test') print_unique(df_test) df.info() df_Y.info()
code
106198039/cell_3
[ "text_plain_output_1.png" ]
!head /kaggle/input/spaceship-titanic/train.csv
code
106198039/cell_17
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] df_Y = df_train_Y.copy() df = df_train_X_good_cols.copy() df_test = df_test_X_good_cols.copy() df[['CryoSleep']] = df[['CryoSleep']].fillna(value=df[['CryoSleep']].mode().iloc[0, 0]) df_test[['CryoSleep']] = df_test[['CryoSleep']].fillna(value=df_test[['CryoSleep']].mode().iloc[0, 0]) num_cols = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df[num_cols] = df[num_cols].fillna(value=df[num_cols].median()) df_test[num_cols] = df_test[num_cols].fillna(value=df_test[num_cols].median()) df['CryoSleep'] = df['CryoSleep'].astype(int) df_test['CryoSleep'] = df_test['CryoSleep'].astype(int) df_Y['Transported'] = df_Y['Transported'].astype(int) X_train, X_test, y_train, y_test = train_test_split(df.to_numpy(), df_Y.to_numpy(), test_size=0.1) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators=1000) classifier.fit(X_train, y_train.ravel()) y_pred = classifier.predict(X_test) importances = classifier.feature_importances_ std = np.std([tree.feature_importances_ for tree in classifier.estimators_], axis=0) forest_importances = pd.Series(importances, index=good_columns) fig, ax = plt.subplots() forest_importances.plot.bar(yerr=std, ax=ax) ax.set_title("Feature importances using MDI") ax.set_ylabel("Mean decrease in impurity") fig.tight_layout() test_pred = classifier.predict(df_test.to_numpy()) df_test_df = pd.DataFrame(test_pred) a = df_test_original[['PassengerId']] b = df_test_df.astype(bool) pd.concat([a, b])
code
106198039/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train df_train.info(verbose=True)
code
106198039/cell_12
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train def print_unique(df): for col in df: unique = df[col].unique() max_len = 6 if len(unique) <= max_len: unique = [f'{u}({(df[col] == u).sum()})' for u in unique] if len(unique) > max_len: unique = list(unique[:max_len]) + [f'...({len(unique) - max_len})'] df_train_Y = df_train[['Transported']] good_columns = ['CryoSleep', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df_train_X_good_cols = df_train[good_columns] df_test_X_good_cols = df_test[good_columns] df_Y = df_train_Y.copy() df = df_train_X_good_cols.copy() df_test = df_test_X_good_cols.copy() df[['CryoSleep']] = df[['CryoSleep']].fillna(value=df[['CryoSleep']].mode().iloc[0, 0]) df_test[['CryoSleep']] = df_test[['CryoSleep']].fillna(value=df_test[['CryoSleep']].mode().iloc[0, 0]) num_cols = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] df[num_cols] = df[num_cols].fillna(value=df[num_cols].median()) df_test[num_cols] = df_test[num_cols].fillna(value=df_test[num_cols].median()) df['CryoSleep'] = df['CryoSleep'].astype(int) df_test['CryoSleep'] = df_test['CryoSleep'].astype(int) df_Y['Transported'] = df_Y['Transported'].astype(int) X_train, X_test, y_train, y_test = train_test_split(df.to_numpy(), df_Y.to_numpy(), test_size=0.1) from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators=1000) classifier.fit(X_train, y_train.ravel()) y_pred = classifier.predict(X_test) importances = classifier.feature_importances_ std = np.std([tree.feature_importances_ for tree in classifier.estimators_], axis=0) forest_importances = pd.Series(importances, index=good_columns) fig, ax = plt.subplots() forest_importances.plot.bar(yerr=std, ax=ax) ax.set_title('Feature importances using MDI') ax.set_ylabel('Mean decrease in impurity') fig.tight_layout()
code
106198039/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_filename = '/kaggle/input/spaceship-titanic/train.csv' test_filename = '/kaggle/input/spaceship-titanic/test.csv' df_train = pd.read_csv(train_filename) df_test = pd.read_csv(test_filename) df_test_original = df_test.copy() df_train
code
105216211/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105216211/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full.info() X_full.head()
code
105216211/cell_18
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.metrics import f1_score from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler import datetime import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full['created_at'] = pd.to_datetime(X_full['created_at']) X_full['signup_date'] = pd.to_datetime(X_full['signup_date']) dt = datetime.date.today() today = pd.DatetimeIndex([dt])[0] X_full['signup_date'] = today - X_full['signup_date'] X_full['created_at'] = today - X_full['created_at'] X_full['created_at'] = (X_full['created_at'] / np.timedelta64(1, 'D')).astype(int, errors='ignore') X_full['signup_date'] = (X_full['signup_date'] / np.timedelta64(1, 'D')).astype(int, errors='ignore') cols = X_train.columns imp = SimpleImputer(strategy='constant') scaler = MinMaxScaler() X_train = imp.fit_transform(X_train) X_valid = imp.transform(X_valid) X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_train = pd.DataFrame(X_train, columns=[cols]) X_valid = pd.DataFrame(X_valid, columns=[cols]) model = MLPClassifier(random_state=1, hidden_layer_sizes=(30, 20), max_iter=250) model.fit(X_train, y_train) preds = model.predict(X_valid) F1 = f1_score(y_valid, preds) ID = X_test.index X_test['created_at'] = pd.to_datetime(X_test['created_at']) X_test['signup_date'] = pd.to_datetime(X_test['signup_date']) X_test['signup_date'] = today - X_test['signup_date'] X_test['created_at'] = today - X_test['created_at'] X_test['created_at'] = (X_test['created_at'] / np.timedelta64(1, 'D')).astype(int, errors='ignore') X_test['signup_date'] = (X_test['signup_date'] / np.timedelta64(1, 'D')).astype(int, errors='ignore') X_test = imp.transform(X_test) X_test = scaler.transform(X_test) X_test = pd.DataFrame(X_test, columns=[cols]) preds = model.predict(X_test)
code
105216211/cell_15
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler import datetime import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full['created_at'] = pd.to_datetime(X_full['created_at']) X_full['signup_date'] = pd.to_datetime(X_full['signup_date']) dt = datetime.date.today() today = pd.DatetimeIndex([dt])[0] X_full['signup_date'] = today - X_full['signup_date'] X_full['created_at'] = today - X_full['created_at'] cols = X_train.columns imp = SimpleImputer(strategy='constant') scaler = MinMaxScaler() X_train = imp.fit_transform(X_train) X_valid = imp.transform(X_valid) X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_train = pd.DataFrame(X_train, columns=[cols]) X_valid = pd.DataFrame(X_valid, columns=[cols]) X_train.describe() X_train.head()
code
105216211/cell_16
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.metrics import f1_score from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler import datetime import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full['created_at'] = pd.to_datetime(X_full['created_at']) X_full['signup_date'] = pd.to_datetime(X_full['signup_date']) dt = datetime.date.today() today = pd.DatetimeIndex([dt])[0] X_full['signup_date'] = today - X_full['signup_date'] X_full['created_at'] = today - X_full['created_at'] cols = X_train.columns imp = SimpleImputer(strategy='constant') scaler = MinMaxScaler() X_train = imp.fit_transform(X_train) X_valid = imp.transform(X_valid) X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_train = pd.DataFrame(X_train, columns=[cols]) X_valid = pd.DataFrame(X_valid, columns=[cols]) model = MLPClassifier(random_state=1, hidden_layer_sizes=(30, 20), max_iter=250) model.fit(X_train, y_train) preds = model.predict(X_valid) F1 = f1_score(y_valid, preds) print(F1)
code
105216211/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full.info() X_full.head()
code
105216211/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) file_path = '../input/analyticsvjobathon/train_wn75k28.csv' lead_data = pd.read_csv(file_path, index_col='id') file_path2 = '../input/analyticsvjobathon/test_Wf7sxXF.csv' X_test = pd.read_csv(file_path2, index_col='id') X = lead_data.copy() y = X.buy X_full = X.drop(['buy'], axis=1) X_full.info()
code
18114963/cell_7
[ "image_output_1.png" ]
import bs4 import pandas as pd import requests r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code soup = bs4.BeautifulSoup(r.content) graphs = soup.find_all('p') utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()] utterances = utterances[2:] seq = 0 data = [] for i in utterances: graph = i.split() if graph[0][-1] == ':': text = ' '.join(graph[1:]) num_words = len(graph) - 1 name = graph[0][:-1] seq += 1 elif len(graph) > 1 and graph[1] == '(?):': text = ' '.join(graph[2:]) num_words = len(graph) - 2 name = graph[0] seq += 1 else: text = ' '.join(graph) data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words}) df = pd.DataFrame(data) df.name.unique()
code
18114963/cell_15
[ "image_output_1.png" ]
from wordcloud import WordCloud import bs4 import matplotlib.pyplot as plt import pandas as pd import requests import sklearn.feature_extraction.text as skt r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code soup = bs4.BeautifulSoup(r.content) graphs = soup.find_all('p') utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()] utterances = utterances[2:] seq = 0 data = [] for i in utterances: graph = i.split() if graph[0][-1] == ':': text = ' '.join(graph[1:]) num_words = len(graph) - 1 name = graph[0][:-1] seq += 1 elif len(graph) > 1 and graph[1] == '(?):': text = ' '.join(graph[2:]) num_words = len(graph) - 2 name = graph[0] seq += 1 else: text = ' '.join(graph) data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words}) df = pd.DataFrame(data) df.name.unique() df = df[df.name != '(UNKNOWN)'] df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()])) def processing(x): return pd.Series({'graph': f"{' '.join(x['graph'])}", 'name': x['name'].iloc[0]}) df = df.groupby('seq').apply(processing) import numpy as np import sklearn.feature_extraction.text as skt from wordcloud import WordCloud def topwords_candidate(candidate_name, n): lectures = ['this is some food', 'this is some drink'] vectorizer = skt.TfidfVectorizer(stop_words='english') X = vectorizer.fit_transform(df[df['name'] == candidate_name]['graph']) feature_names = vectorizer.get_feature_names() doc = 0 feature_index = X[doc, :].nonzero()[1] tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index]) scored_features = sorted([(feature_names[i], s) for i, s in tfidf_scores], key=lambda x: x[1]) data = scored_features[-n:] wordcloud = WordCloud().generate(' '.join([x[0] for x in data][::-1])) import matplotlib.pyplot as plt plt.axis('off') return (data, wordcloud) topwords_candidate('SANDERS', 10)
code
18114963/cell_3
[ "image_output_1.png" ]
import requests r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code
code
18114963/cell_17
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import bs4 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import requests import sklearn.feature_extraction.text as skt r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code soup = bs4.BeautifulSoup(r.content) graphs = soup.find_all('p') utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()] utterances = utterances[2:] seq = 0 data = [] for i in utterances: graph = i.split() if graph[0][-1] == ':': text = ' '.join(graph[1:]) num_words = len(graph) - 1 name = graph[0][:-1] seq += 1 elif len(graph) > 1 and graph[1] == '(?):': text = ' '.join(graph[2:]) num_words = len(graph) - 2 name = graph[0] seq += 1 else: text = ' '.join(graph) data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words}) df = pd.DataFrame(data) df.name.unique() df = df[df.name != '(UNKNOWN)'] df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()])) def processing(x): return pd.Series({'graph': f"{' '.join(x['graph'])}", 'name': x['name'].iloc[0]}) df = df.groupby('seq').apply(processing) import numpy as np import sklearn.feature_extraction.text as skt from wordcloud import WordCloud def topwords_candidate(candidate_name, n): lectures = ['this is some food', 'this is some drink'] vectorizer = skt.TfidfVectorizer(stop_words='english') X = vectorizer.fit_transform(df[df['name'] == candidate_name]['graph']) feature_names = vectorizer.get_feature_names() doc = 0 feature_index = X[doc, :].nonzero()[1] tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index]) scored_features = sorted([(feature_names[i], s) for i, s in tfidf_scores], key=lambda x: x[1]) data = scored_features[-n:] wordcloud = WordCloud().generate(' '.join([x[0] for x in data][::-1])) import matplotlib.pyplot as plt plt.axis('off') return (data, wordcloud) import matplotlib.pyplot as plt figs, axs = plt.subplots(2, 5, figsize=(80, 20)) candidates = list(filter(lambda x: x not in ['BASH', 'TAPPER', 'LEMON'], df.name.unique())) for i in range(5): axs[0][i].imshow(topwords_candidate(candidates[i], 10)[1]) axs[0][i].axis('off') axs[0][i].set_title(candidates[i], fontsize=40) axs[1][i].imshow(topwords_candidate(candidates[i + 5], 10)[1]) axs[1][i].axis('off') axs[1][i].set_title(candidates[i + 5], fontsize=40)
code
18114963/cell_10
[ "text_plain_output_1.png" ]
import bs4 import pandas as pd import requests r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code soup = bs4.BeautifulSoup(r.content) graphs = soup.find_all('p') utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()] utterances = utterances[2:] seq = 0 data = [] for i in utterances: graph = i.split() if graph[0][-1] == ':': text = ' '.join(graph[1:]) num_words = len(graph) - 1 name = graph[0][:-1] seq += 1 elif len(graph) > 1 and graph[1] == '(?):': text = ' '.join(graph[2:]) num_words = len(graph) - 2 name = graph[0] seq += 1 else: text = ' '.join(graph) data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words}) df = pd.DataFrame(data) df.name.unique() df = df[df.name != '(UNKNOWN)'] df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()])) df.groupby('name').sum()['num_words'].plot(kind='bar')
code
18114963/cell_12
[ "text_plain_output_1.png" ]
import bs4 import pandas as pd import requests r = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate') r.status_code soup = bs4.BeautifulSoup(r.content) graphs = soup.find_all('p') utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()] utterances = utterances[2:] seq = 0 data = [] for i in utterances: graph = i.split() if graph[0][-1] == ':': text = ' '.join(graph[1:]) num_words = len(graph) - 1 name = graph[0][:-1] seq += 1 elif len(graph) > 1 and graph[1] == '(?):': text = ' '.join(graph[2:]) num_words = len(graph) - 2 name = graph[0] seq += 1 else: text = ' '.join(graph) data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words}) df = pd.DataFrame(data) df.name.unique() df = df[df.name != '(UNKNOWN)'] df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()])) def processing(x): return pd.Series({'graph': f"{' '.join(x['graph'])}", 'name': x['name'].iloc[0]}) df = df.groupby('seq').apply(processing) df.head()
code
2033155/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Pakistan Intellectual Capital - Computer Science - Ver 1.csv', encoding='ISO-8859-1') df['University Currently Teaching'].value_counts()[:20].plot(kind='bar')
code
2033155/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Pakistan Intellectual Capital - Computer Science - Ver 1.csv', encoding='ISO-8859-1') df.head()
code
2033155/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Pakistan Intellectual Capital - Computer Science - Ver 1.csv', encoding='ISO-8859-1') df_new = df[df['Other Information'].isin(['On Study Leave', 'On study leave', 'PhD Study Leave', 'On Leave'])] x = df_new['Teacher Name'].count() y = df['Teacher Name'].count() - x df_new = df[df['Year'].isin([2013, 2014, 2015, 2016, 2017])] df_new = df_new[df_new['Terminal Degree'].isin(['PhD', 'Ph.D', 'Phd'])] df_new['Year'].value_counts().plot(kind='bar')
code
2033155/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (12, 5) df = pd.read_csv('../input/Pakistan Intellectual Capital - Computer Science - Ver 1.csv', encoding='ISO-8859-1') df_new = df[df['Other Information'].isin(['On Study Leave', 'On study leave', 'PhD Study Leave', 'On Leave'])] x = df_new['Teacher Name'].count() y = df['Teacher Name'].count() - x plt.pie([x, y], explode=(0.02, 0.09), labels=['On Leave', 'Available'], autopct='%1.1f%%', startangle=140) plt.axis('equal') plt.show()
code
128012764/cell_20
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import pickle import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(learning_rate, units): pre = VGG16(input_shape=(50, 50, 3), include_top=False, pooling='max') for layer in pre.layers: layer.trainable = False x = tf.keras.layers.Dense(units, activation='relu')(pre.output) x = tf.keras.layers.Dense(units / 2, activation='relu')(x) out = tf.keras.layers.Dense(1, activation='sigmoid')(x) transfer_learning_model = tf.keras.models.Model(pre.input, out) transfer_learning_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.Precision(), f1_score]) history = transfer_learning_model.fit(train_generator, epochs=300, validation_data=val_generator, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001)) test_results = transfer_learning_model.evaluate(test_generator) return (transfer_learning_model, history, test_results) model.save('transfer_learning_modelVGG16.h5') with open('transfer_learning_historyVGG16.pkl', 'wb') as file: pickle.dump(history.history, file) with open('transfer_learning_test_resultsVGG16.pkl', 'wb') as file: pickle.dump(test_results, file) total_parameters = model.count_params() mult_adds_total = 0 for layer in model.layers: if isinstance(layer, tf.keras.layers.Conv2D): height, width, channels_in = layer.input_shape[1:] _, _, channels_out = layer.output_shape[1:] kernel_height, kernel_width = layer.kernel_size mult_adds = height * width * channels_in * channels_out * kernel_height * kernel_width mult_adds_total += mult_adds print('Total parameters:', total_parameters) print('Total number of multipy-accumulates:', mult_adds_total)
code
128012764/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import shutil import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import glob import random import tensorflow as tf import keras.utils as image random.seed(42) tf.random.set_seed(42) from tensorflow.keras import layers from tensorflow.keras.applications import VGG16 from tensorflow.keras.utils import to_categorical from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator import pickle import keras_tuner as kt import tensorflow.keras.backend as K
code
128012764/cell_18
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(learning_rate, units): pre = VGG16(input_shape=(50, 50, 3), include_top=False, pooling='max') for layer in pre.layers: layer.trainable = False x = tf.keras.layers.Dense(units, activation='relu')(pre.output) x = tf.keras.layers.Dense(units / 2, activation='relu')(x) out = tf.keras.layers.Dense(1, activation='sigmoid')(x) transfer_learning_model = tf.keras.models.Model(pre.input, out) transfer_learning_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.Precision(), f1_score]) history = transfer_learning_model.fit(train_generator, epochs=300, validation_data=val_generator, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001)) test_results = transfer_learning_model.evaluate(test_generator) return (transfer_learning_model, history, test_results) print('Test Results') print('\n-------------\n') print('Test Loss:', format(test_results[0], '.3f')) print('Test Precision: ', format(test_results[1], '.3f')) print('Test Recall: ', format(test_results[2], '.3f')) print('Test F1:', format(test_results[3], '.3f'))
code
128012764/cell_15
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(learning_rate, units): pre = VGG16(input_shape=(50, 50, 3), include_top=False, pooling='max') for layer in pre.layers: layer.trainable = False x = tf.keras.layers.Dense(units, activation='relu')(pre.output) x = tf.keras.layers.Dense(units / 2, activation='relu')(x) out = tf.keras.layers.Dense(1, activation='sigmoid')(x) transfer_learning_model = tf.keras.models.Model(pre.input, out) transfer_learning_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.Precision(), f1_score]) history = transfer_learning_model.fit(train_generator, epochs=300, validation_data=val_generator, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001)) test_results = transfer_learning_model.evaluate(test_generator) return (transfer_learning_model, history, test_results) best_params = {'units': 100, 'learning_rate': 0.001} model, history, test_results = train_best_model(**best_params)
code
128012764/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import matplotlib.pyplot as plt import numpy as np import pandas as pd import pickle import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(learning_rate, units): pre = VGG16(input_shape=(50, 50, 3), include_top=False, pooling='max') for layer in pre.layers: layer.trainable = False x = tf.keras.layers.Dense(units, activation='relu')(pre.output) x = tf.keras.layers.Dense(units / 2, activation='relu')(x) out = tf.keras.layers.Dense(1, activation='sigmoid')(x) transfer_learning_model = tf.keras.models.Model(pre.input, out) transfer_learning_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.Precision(), f1_score]) history = transfer_learning_model.fit(train_generator, epochs=300, validation_data=val_generator, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001)) test_results = transfer_learning_model.evaluate(test_generator) return (transfer_learning_model, history, test_results) model.save('transfer_learning_modelVGG16.h5') with open('transfer_learning_historyVGG16.pkl', 'wb') as file: pickle.dump(history.history, file) with open('transfer_learning_test_resultsVGG16.pkl', 'wb') as file: pickle.dump(test_results, file) def plot_the_results(history): plt.style.use('seaborn') plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_f1_score'], label='Val F1 Score', linewidth=2) plt.plot(history.epoch, history.history['f1_score'], label='f1_score', linewidth=2) plt.legend() plt.title('F1 Score') plt.show() plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_precision'], label='Val Precision', linewidth=2) plt.plot(history.epoch, history.history['precision'], label='Precision', linewidth=2) plt.legend() plt.title('Precision') plt.show() plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_recall'], label='Val Recall', linewidth=2) plt.plot(history.epoch, history.history['recall'], label='Recall', linewidth=2) plt.legend() plt.title('Recall') plt.show() plot_the_results(history)
code
128012764/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import pandas as pd breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw')
code
105173129/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() def get_continent_country(x): NorthAmerica = ['Barbados', 'Bermuda', 'Canada', 'Greenland', 'Jamaica', 'Mexico', 'Panama', 'Costa Rica', 'Dominican Republic', 'Guatemala', 'Puerto Rico', 'United States', 'Bahamas', 'Cuba', 'Honduras'] SouthAmerica = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Peru', 'Uruguay', 'Venezuela', 'Guyana', 'Paraguay'] Europe = ['Andorra', 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom', 'Monaco'] Africa = ['Benin', 'Cameroon', 'Central African Republic', 'Democratic Republic of Congo', 'Ghana', 'Liberia', 'Morocco', 'Nigeria', 'South Africa', 'Sudan'] Asia = ['Cyprus', 'India', 'Israel', 'Japan', 'Lebanon', 'Philippines', 'Qatar', 'Russia', 'Saudi Arabia', 'Singapore', 'South Korea', 'Taiwan', 'Thailand', 'Turkey', 'United Arab Emirates', 'Indonesia', 'Iran'] Oceania = ['Australia', 'New Zealand', 'New Caledonia'] if x in NorthAmerica: return 'North America' elif x in SouthAmerica: return 'South America' elif x in Europe: return 'Europe' elif x in Africa: return 'Africa' elif x in Asia: return 'Asia' else: return 'Oceania' continent = [] for country in monkeypox_countr['location']: y = get_continent_country(country) continent.append(y) monkeypox_countr['Continent'] = pd.Series(continent) monkeypox_cont = monkeypox_countr[['Continent', 'location', 'date', 'new_cases', 'new_cases_smoothed', 'total_cases', 'new_cases_per_million', 'total_cases_per_million', 'new_cases_smoothed_per_million', 'new_deaths', 'new_deaths_smoothed', 'total_deaths', 'new_deaths_per_million', 'total_deaths_per_million', 'new_deaths_smoothed_per_million']].copy() total_cases_continent = monkeypox_cont.set_index('Continent').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False) total_cases_country = monkeypox_cont.set_index('location').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False)[:20] Europe = monkeypox_cont[monkeypox_cont['Continent'] == 'Europe'].copy() Europe['new_cases_%'] = Europe['new_cases'] / np.sum(Europe['new_cases']) Europe = Europe.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:5] print('Top 5 countries in Europe') print(Europe) print('------------------------------------') NorthAmerica = monkeypox_cont[monkeypox_cont['Continent'] == 'North America'].copy() NorthAmerica['new_cases_%'] = NorthAmerica['new_cases'] / np.sum(NorthAmerica['new_cases']) NorthAmerica = NorthAmerica.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:3] print('Top 3 countries in North America') print(NorthAmerica)
code
105173129/cell_4
[ "image_output_1.png" ]
import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.head()
code
105173129/cell_6
[ "image_output_1.png" ]
import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') print(monkeypox.isnull().sum())
code
105173129/cell_8
[ "image_output_1.png" ]
import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() print('Countries :', monkeypox_countr.location.unique()) print('We have', len(monkeypox_countr.location.unique()), 'countries')
code
105173129/cell_15
[ "text_html_output_1.png" ]
from matplotlib.patches import ConnectionPatch import matplotlib.pyplot as plt import numpy as np import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() def get_continent_country(x): NorthAmerica = ['Barbados', 'Bermuda', 'Canada', 'Greenland', 'Jamaica', 'Mexico', 'Panama', 'Costa Rica', 'Dominican Republic', 'Guatemala', 'Puerto Rico', 'United States', 'Bahamas', 'Cuba', 'Honduras'] SouthAmerica = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Peru', 'Uruguay', 'Venezuela', 'Guyana', 'Paraguay'] Europe = ['Andorra', 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom', 'Monaco'] Africa = ['Benin', 'Cameroon', 'Central African Republic', 'Democratic Republic of Congo', 'Ghana', 'Liberia', 'Morocco', 'Nigeria', 'South Africa', 'Sudan'] Asia = ['Cyprus', 'India', 'Israel', 'Japan', 'Lebanon', 'Philippines', 'Qatar', 'Russia', 'Saudi Arabia', 'Singapore', 'South Korea', 'Taiwan', 'Thailand', 'Turkey', 'United Arab Emirates', 'Indonesia', 'Iran'] Oceania = ['Australia', 'New Zealand', 'New Caledonia'] if x in NorthAmerica: return 'North America' elif x in SouthAmerica: return 'South America' elif x in Europe: return 'Europe' elif x in Africa: return 'Africa' elif x in Asia: return 'Asia' else: return 'Oceania' continent = [] for country in monkeypox_countr['location']: y = get_continent_country(country) continent.append(y) monkeypox_countr['Continent'] = pd.Series(continent) monkeypox_cont = monkeypox_countr[['Continent', 'location', 'date', 'new_cases', 'new_cases_smoothed', 'total_cases', 'new_cases_per_million', 'total_cases_per_million', 'new_cases_smoothed_per_million', 'new_deaths', 'new_deaths_smoothed', 'total_deaths', 'new_deaths_per_million', 'total_deaths_per_million', 'new_deaths_smoothed_per_million']].copy() total_cases_continent = monkeypox_cont.set_index('Continent').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False) total_cases_country = monkeypox_cont.set_index('location').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False)[:20] Europe = monkeypox_cont[monkeypox_cont['Continent'] == 'Europe'].copy() Europe['new_cases_%'] = Europe['new_cases'] / np.sum(Europe['new_cases']) Europe = Europe.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:5] NorthAmerica = monkeypox_cont[monkeypox_cont['Continent'] == 'North America'].copy() NorthAmerica['new_cases_%'] = NorthAmerica['new_cases'] / np.sum(NorthAmerica['new_cases']) NorthAmerica = NorthAmerica.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:3] def graph_1(): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 5)) fig.subplots_adjust(wspace=0) plt.style.use('seaborn-colorblind') cases = list(total_cases_continent.values) labels = list(total_cases_continent.index) explode = [0.1, 0, 0, 0, 0, 0] angle = -270 * cases[0] wedges, *_ = ax1.pie(cases, autopct='%1.1f%%', startangle=angle, labels=labels, explode=explode, rotatelabels=True) age_ratios = list(Europe.values) age_labels = list(Europe.index) bottom = 0.82 width = 0.2 for j, (height, label) in enumerate(reversed([*zip(age_ratios, age_labels)])): bottom -= height bc = ax2.bar(0, height, width, bottom=bottom, color='C0', label=label, alpha=0.1 + 0.15 * j) ax2.bar_label(bc, labels=[f'{height:.1%}'], label_type='center') ax1.set_title('Total cases in the World') ax2.set_title('% Cases in Europe: Top 5') ax2.legend() ax2.axis('off') ax2.set_xlim(-1.5 * width, 2.5 * width) theta1, theta2 = (wedges[0].theta1, wedges[0].theta2) center, r = (wedges[0].center, wedges[0].r) bar_height = sum(age_ratios) x = r * np.cos(np.pi / 180 * theta2) + center[0] y = r * np.sin(np.pi / 180 * theta2) + center[1] con = ConnectionPatch(xyA=(-width / 2, bar_height), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) con.set_linewidth(4) ax2.add_artist(con) x = r * np.cos(np.pi / 180 * theta1) + center[0] y = r * np.sin(np.pi / 180 * theta1) + center[1] con = ConnectionPatch(xyA=(-width / 2, 0), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) ax2.add_artist(con) con.set_linewidth(4) return plt.show() graph_1()
code
105173129/cell_16
[ "text_plain_output_1.png" ]
from matplotlib.patches import ConnectionPatch import matplotlib.pyplot as plt import numpy as np import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() def get_continent_country(x): NorthAmerica = ['Barbados', 'Bermuda', 'Canada', 'Greenland', 'Jamaica', 'Mexico', 'Panama', 'Costa Rica', 'Dominican Republic', 'Guatemala', 'Puerto Rico', 'United States', 'Bahamas', 'Cuba', 'Honduras'] SouthAmerica = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Peru', 'Uruguay', 'Venezuela', 'Guyana', 'Paraguay'] Europe = ['Andorra', 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom', 'Monaco'] Africa = ['Benin', 'Cameroon', 'Central African Republic', 'Democratic Republic of Congo', 'Ghana', 'Liberia', 'Morocco', 'Nigeria', 'South Africa', 'Sudan'] Asia = ['Cyprus', 'India', 'Israel', 'Japan', 'Lebanon', 'Philippines', 'Qatar', 'Russia', 'Saudi Arabia', 'Singapore', 'South Korea', 'Taiwan', 'Thailand', 'Turkey', 'United Arab Emirates', 'Indonesia', 'Iran'] Oceania = ['Australia', 'New Zealand', 'New Caledonia'] if x in NorthAmerica: return 'North America' elif x in SouthAmerica: return 'South America' elif x in Europe: return 'Europe' elif x in Africa: return 'Africa' elif x in Asia: return 'Asia' else: return 'Oceania' continent = [] for country in monkeypox_countr['location']: y = get_continent_country(country) continent.append(y) monkeypox_countr['Continent'] = pd.Series(continent) monkeypox_cont = monkeypox_countr[['Continent', 'location', 'date', 'new_cases', 'new_cases_smoothed', 'total_cases', 'new_cases_per_million', 'total_cases_per_million', 'new_cases_smoothed_per_million', 'new_deaths', 'new_deaths_smoothed', 'total_deaths', 'new_deaths_per_million', 'total_deaths_per_million', 'new_deaths_smoothed_per_million']].copy() total_cases_continent = monkeypox_cont.set_index('Continent').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False) total_cases_country = monkeypox_cont.set_index('location').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False)[:20] Europe = monkeypox_cont[monkeypox_cont['Continent'] == 'Europe'].copy() Europe['new_cases_%'] = Europe['new_cases'] / np.sum(Europe['new_cases']) Europe = Europe.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:5] NorthAmerica = monkeypox_cont[monkeypox_cont['Continent'] == 'North America'].copy() NorthAmerica['new_cases_%'] = NorthAmerica['new_cases'] / np.sum(NorthAmerica['new_cases']) NorthAmerica = NorthAmerica.set_index('location').groupby(level=0)['new_cases_%'].agg(np.sum).sort_values(ascending=False)[:3] #PRINT GRAPH EUROPE def graph_1(): # make figure and assign axis objects fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 5)) fig.subplots_adjust(wspace=0) plt.style.use('seaborn-colorblind') # pie chart parameters cases = list(total_cases_continent.values) labels = list(total_cases_continent.index) explode = [0.1, 0, 0,0,0,0] # rotate so that first wedge is split by the x-axis angle = -270 * cases[0] wedges, *_ = ax1.pie(cases, autopct='%1.1f%%', startangle=angle, labels=labels, explode=explode, rotatelabels=True) # bar chart parameters Europe age_ratios = list(Europe.values) age_labels = list(Europe.index) bottom = .82 width = .2 # Adding from the top matches the legend. Europe for j, (height, label) in enumerate(reversed([*zip(age_ratios, age_labels)])): bottom -= height bc = ax2.bar(0, height, width, bottom=bottom, color='C0', label=label, alpha=0.1 + 0.15 * j) ax2.bar_label(bc, labels=[f"{height:.1%}"], label_type='center') ax1.set_title('Total cases in the World') ax2.set_title('% Cases in Europe: Top 5') ax2.legend() ax2.axis('off') ax2.set_xlim(- 1.5 * width, 2.5 * width) # use ConnectionPatch to draw lines between the two plots:Europe theta1, theta2 = wedges[0].theta1, wedges[0].theta2 center, r = wedges[0].center, wedges[0].r bar_height = sum(age_ratios) # draw top connecting line x = r * np.cos(np.pi / 180 * theta2) + center[0] y = r * np.sin(np.pi / 180 * theta2) + center[1] con = ConnectionPatch(xyA=(-width / 2, bar_height), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) con.set_linewidth(4) ax2.add_artist(con) # draw bottom connecting line x = r * np.cos(np.pi / 180 * theta1) + center[0] y = r * np.sin(np.pi / 180 * theta1) + center[1] con = ConnectionPatch(xyA=(-width / 2, 0), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) ax2.add_artist(con) con.set_linewidth(4) return plt.show() graph_1() def graph_2(): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 5)) fig.subplots_adjust(wspace=0) plt.style.use('seaborn-colorblind') cases = list(total_cases_continent.values) labels = list(total_cases_continent.index) explode = [0, 0.1, 0, 0, 0, 0] angle = -88 * cases[0] wedges, labels, pct_texts, *_ = ax1.pie(cases, autopct='%1.1f%%', startangle=angle, labels=labels, explode=explode, rotatelabels=True, labeldistance=1) for label, pct_text in zip(labels, pct_texts): pct_text.set_rotation(label.get_rotation()) north_ratios = list(NorthAmerica.values) north_labels = list(NorthAmerica.index) bottom = 1 width = 0.2 for q, (height, label) in enumerate(reversed([*zip(north_ratios, north_labels)])): bottom -= height bc = ax2.bar(0, height, width, bottom=bottom, color='C1', label=label, alpha=0.1 + 0.25 * q) ax2.bar_label(bc, labels=[f'{height:.1%}'], label_type='center') ax2.set_title('% Cases in North America: Top 3') ax2.legend() ax2.axis('off') ax2.set_xlim(-1.5 * width, 2.5 * width) theta1, theta2 = (wedges[1].theta1, wedges[1].theta2) center, r = (wedges[1].center, wedges[1].r) bar_height = sum(north_ratios) x = r * np.cos(np.pi / 180 * theta2) + center[0] y = r * np.sin(np.pi / 180 * theta2) + center[1] con = ConnectionPatch(xyA=(-width / 2, bar_height), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) con.set_linewidth(4) ax2.add_artist(con) x = r * np.cos(np.pi / 180 * theta1) + center[0] y = r * np.sin(np.pi / 180 * theta1) + center[1] con = ConnectionPatch(xyA=(-width / 2, 0), coordsA=ax2.transData, xyB=(x, y), coordsB=ax1.transData) con.set_color([0, 0, 0]) ax2.add_artist(con) con.set_linewidth(4) return plt.show() graph_2()
code
105173129/cell_10
[ "text_html_output_1.png" ]
import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() def get_continent_country(x): NorthAmerica = ['Barbados', 'Bermuda', 'Canada', 'Greenland', 'Jamaica', 'Mexico', 'Panama', 'Costa Rica', 'Dominican Republic', 'Guatemala', 'Puerto Rico', 'United States', 'Bahamas', 'Cuba', 'Honduras'] SouthAmerica = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Peru', 'Uruguay', 'Venezuela', 'Guyana', 'Paraguay'] Europe = ['Andorra', 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom', 'Monaco'] Africa = ['Benin', 'Cameroon', 'Central African Republic', 'Democratic Republic of Congo', 'Ghana', 'Liberia', 'Morocco', 'Nigeria', 'South Africa', 'Sudan'] Asia = ['Cyprus', 'India', 'Israel', 'Japan', 'Lebanon', 'Philippines', 'Qatar', 'Russia', 'Saudi Arabia', 'Singapore', 'South Korea', 'Taiwan', 'Thailand', 'Turkey', 'United Arab Emirates', 'Indonesia', 'Iran'] Oceania = ['Australia', 'New Zealand', 'New Caledonia'] if x in NorthAmerica: return 'North America' elif x in SouthAmerica: return 'South America' elif x in Europe: return 'Europe' elif x in Africa: return 'Africa' elif x in Asia: return 'Asia' else: return 'Oceania' continent = [] for country in monkeypox_countr['location']: y = get_continent_country(country) continent.append(y) monkeypox_countr['Continent'] = pd.Series(continent) monkeypox_cont = monkeypox_countr[['Continent', 'location', 'date', 'new_cases', 'new_cases_smoothed', 'total_cases', 'new_cases_per_million', 'total_cases_per_million', 'new_cases_smoothed_per_million', 'new_deaths', 'new_deaths_smoothed', 'total_deaths', 'new_deaths_per_million', 'total_deaths_per_million', 'new_deaths_smoothed_per_million']].copy() monkeypox_cont.head()
code
105173129/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd monkeypox = pd.read_csv('../input/worldwide-monkeypox-daily-dataset/owid-monkeypox-data.csv') monkeypox.replace('Congo', 'Democratic Republic of Congo', inplace=True) monkeypox_countr = monkeypox[monkeypox['location'] != 'World'].copy() def get_continent_country(x): NorthAmerica = ['Barbados', 'Bermuda', 'Canada', 'Greenland', 'Jamaica', 'Mexico', 'Panama', 'Costa Rica', 'Dominican Republic', 'Guatemala', 'Puerto Rico', 'United States', 'Bahamas', 'Cuba', 'Honduras'] SouthAmerica = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Peru', 'Uruguay', 'Venezuela', 'Guyana', 'Paraguay'] Europe = ['Andorra', 'Austria', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom', 'Monaco'] Africa = ['Benin', 'Cameroon', 'Central African Republic', 'Democratic Republic of Congo', 'Ghana', 'Liberia', 'Morocco', 'Nigeria', 'South Africa', 'Sudan'] Asia = ['Cyprus', 'India', 'Israel', 'Japan', 'Lebanon', 'Philippines', 'Qatar', 'Russia', 'Saudi Arabia', 'Singapore', 'South Korea', 'Taiwan', 'Thailand', 'Turkey', 'United Arab Emirates', 'Indonesia', 'Iran'] Oceania = ['Australia', 'New Zealand', 'New Caledonia'] if x in NorthAmerica: return 'North America' elif x in SouthAmerica: return 'South America' elif x in Europe: return 'Europe' elif x in Africa: return 'Africa' elif x in Asia: return 'Asia' else: return 'Oceania' continent = [] for country in monkeypox_countr['location']: y = get_continent_country(country) continent.append(y) monkeypox_countr['Continent'] = pd.Series(continent) monkeypox_cont = monkeypox_countr[['Continent', 'location', 'date', 'new_cases', 'new_cases_smoothed', 'total_cases', 'new_cases_per_million', 'total_cases_per_million', 'new_cases_smoothed_per_million', 'new_deaths', 'new_deaths_smoothed', 'total_deaths', 'new_deaths_per_million', 'total_deaths_per_million', 'new_deaths_smoothed_per_million']].copy() total_cases_continent = monkeypox_cont.set_index('Continent').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False) print('TOTAL CASES BY CONTINENT') print(total_cases_continent) print('------------------------------') total_cases_country = monkeypox_cont.set_index('location').groupby(level=0)['new_cases'].agg(np.sum).sort_values(ascending=False)[:20] print('TOTAL CASES BY COUNTRY: TOP 20') print(total_cases_country)
code
18118979/cell_13
[ "text_html_output_1.png" ]
out_path = Path('./') out_path.ls() in_path = Path('../input/') tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20.0, max_zoom=1.1, max_lighting=0.0, max_warp=0.2, p_affine=0.75, p_lighting=0.0) test = CustomImageList.from_csv_custom(path=in_path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=in_path, csv_name='train.csv').split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test).transform(tfms).databunch(bs=64, num_workers=0).normalize(imagenet_stats) learner = cnn_learner(data, models.resnet50, metrics=error_rate, pretrained=False, model_dir='./') learner.fit_one_cycle(25) interp = ClassificationInterpretation.from_learner(learner) losses, idxs = interp.top_losses() interp.plot_top_losses(16, figsize=(15, 11))
code
18118979/cell_4
[ "image_output_1.png" ]
import pandas as pd out_path = Path('./') out_path.ls() in_path = Path('../input/') df = pd.read_csv(in_path / 'train.csv') df.head(n=5)
code
18118979/cell_11
[ "image_output_1.png" ]
out_path = Path('./') out_path.ls() in_path = Path('../input/') tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20.0, max_zoom=1.1, max_lighting=0.0, max_warp=0.2, p_affine=0.75, p_lighting=0.0) test = CustomImageList.from_csv_custom(path=in_path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=in_path, csv_name='train.csv').split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test).transform(tfms).databunch(bs=64, num_workers=0).normalize(imagenet_stats) learner = cnn_learner(data, models.resnet50, metrics=error_rate, pretrained=False, model_dir='./') learner.fit_one_cycle(25) learner.recorder.plot_losses()
code
18118979/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import os print(os.listdir('../input'))
code
18118979/cell_8
[ "image_output_1.png" ]
out_path = Path('./') out_path.ls() in_path = Path('../input/') tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20.0, max_zoom=1.1, max_lighting=0.0, max_warp=0.2, p_affine=0.75, p_lighting=0.0) test = CustomImageList.from_csv_custom(path=in_path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=in_path, csv_name='train.csv').split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test).transform(tfms).databunch(bs=64, num_workers=0).normalize(imagenet_stats) data.show_batch(rows=3, figsize=(12, 9))
code
18118979/cell_10
[ "text_html_output_1.png" ]
out_path = Path('./') out_path.ls() in_path = Path('../input/') tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20.0, max_zoom=1.1, max_lighting=0.0, max_warp=0.2, p_affine=0.75, p_lighting=0.0) test = CustomImageList.from_csv_custom(path=in_path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=in_path, csv_name='train.csv').split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test).transform(tfms).databunch(bs=64, num_workers=0).normalize(imagenet_stats) learner = cnn_learner(data, models.resnet50, metrics=error_rate, pretrained=False, model_dir='./') learner.fit_one_cycle(25)
code
34140599/cell_7
[ "text_plain_output_1.png" ]
import os import pandas as pd import spacy folder = '../input/nlp-getting-started' test = pd.read_csv(os.path.join(folder, 'test.csv'), index_col='id') train = pd.read_csv(os.path.join(folder, 'train.csv'), index_col='id') X = train.drop(columns='target') y = train['target'] nlp = spacy.load('en_core_web_sm') doc = nlp(X['text'].iloc[0]) for token in doc: print(token.text, token.lemma_, token.dep_, token.pos_)
code
34140599/cell_3
[ "text_plain_output_1.png" ]
import os import pandas as pd folder = '../input/nlp-getting-started' test = pd.read_csv(os.path.join(folder, 'test.csv'), index_col='id') train = pd.read_csv(os.path.join(folder, 'train.csv'), index_col='id') X = train.drop(columns='target') y = train['target'] len(X)
code
34140599/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd folder = '../input/nlp-getting-started' test = pd.read_csv(os.path.join(folder, 'test.csv'), index_col='id') train = pd.read_csv(os.path.join(folder, 'train.csv'), index_col='id') X = train.drop(columns='target') y = train['target'] X['text'].iloc[0]
code
90152351/cell_23
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid) my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') my_model.fit(x_train, y_train) my_model.score(x_valid, y_valid) my_model = KNeighborsRegressor() PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid) my_model = KNeighborsRegressor() my_model.fit(x_train, y_train) my_model.score(x_valid, y_valid)
code
90152351/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid) my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') my_model.fit(x_train, y_train) my_model.score(x_valid, y_valid)
code
90152351/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.feature_selection import mutual_info_regression from sklearn.model_selection import train_test_split from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsRegressor df = pd.read_csv('../input/fe-course-data/customer.csv') df.dtypes
code
90152351/cell_19
[ "text_plain_output_1.png" ]
from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid)
code
90152351/cell_22
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid) my_model = XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=10, min_child_weight=5, n_estimators=150, nthread=-1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, subsample=1, eval_metric='mlogloss') my_model.fit(x_train, y_train) my_model.score(x_valid, y_valid) my_model = KNeighborsRegressor() PIPE = Pipeline(steps=[('preproc', StandardScaler()), ('model', my_model)]) PIPE.fit(x_train, y_train) PIPE.score(x_valid, y_valid)
code
90152351/cell_10
[ "text_html_output_1.png" ]
from sklearn.feature_selection import mutual_info_regression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.feature_selection import mutual_info_regression from sklearn.model_selection import train_test_split from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsRegressor df = pd.read_csv('../input/fe-course-data/customer.csv') df.dtypes y = df['ClaimAmount'] X = df.drop(['Customer', 'ClaimAmount', 'Unnamed: 0'], axis=1, inplace=False).copy() for col in X.select_dtypes(include=['object']): X[col], unique = X[col].factorize() discrete_features = [True if X[col].dtypes == 'int64' else False for col in X.columns] mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features, random_state=0) mi_scores = pd.Series(mi_scores, index=X.columns, name='MI') mi_scores = mi_scores.sort_values(ascending=False) mi_scores
code
90152351/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.feature_selection import mutual_info_regression from sklearn.model_selection import train_test_split from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsRegressor df = pd.read_csv('../input/fe-course-data/customer.csv') df.dtypes y = df['ClaimAmount'] X = df.drop(['Customer', 'ClaimAmount', 'Unnamed: 0'], axis=1, inplace=False).copy() sns.relplot(x='Income', y='ClaimAmount', data=df)
code
90152351/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.feature_selection import mutual_info_regression from sklearn.model_selection import train_test_split from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsRegressor df = pd.read_csv('../input/fe-course-data/customer.csv') df.head()
code
332834/cell_9
[ "image_output_11.png", "image_output_74.png", "image_output_82.png", "image_output_24.png", "image_output_46.png", "image_output_25.png", "text_plain_output_5.png", "image_output_77.png", "image_output_47.png", "text_plain_output_15.png", "image_output_78.png", "image_output_17.png", "image_output_30.png", "text_plain_output_9.png", "image_output_73.png", "image_output_72.png", "image_output_14.png", "image_output_59.png", "image_output_39.png", "image_output_28.png", "text_plain_output_20.png", "image_output_84.png", "image_output_81.png", "image_output_23.png", "image_output_34.png", "image_output_64.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_68.png", "image_output_75.png", "text_plain_output_14.png", "image_output_18.png", "text_plain_output_29.png", "image_output_58.png", "image_output_21.png", "text_plain_output_27.png", "image_output_52.png", "text_plain_output_10.png", "image_output_60.png", "text_plain_output_6.png", "image_output_7.png", "image_output_62.png", "text_plain_output_24.png", "text_plain_output_21.png", "image_output_56.png", "image_output_31.png", "text_plain_output_25.png", "image_output_65.png", "image_output_20.png", "text_plain_output_18.png", "image_output_69.png", "image_output_32.png", "image_output_53.png", "text_plain_output_3.png", "image_output_4.png", "image_output_51.png", "text_plain_output_22.png", "image_output_83.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_57.png", "text_plain_output_7.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_66.png", "text_plain_output_16.png", "image_output_16.png", "image_output_70.png", "text_plain_output_8.png", "text_plain_output_26.png", "image_output_67.png", "image_output_27.png", "image_output_54.png", "image_output_6.png", "image_output_45.png", "image_output_63.png", "image_output_71.png", "image_output_80.png", "text_plain_output_23.png", "image_output_12.png", "text_plain_output_28.png", "image_output_22.png", "text_plain_output_2.png", "image_output_55.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_76.png", "image_output_9.png", "image_output_19.png", "image_output_79.png", "image_output_61.png", "image_output_38.png", "image_output_26.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10)
code
332834/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') plt.legend() plt.show()
code
332834/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1 people2 = pd.merge(people, hstry, on='people_id', how='inner') people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64) people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64) people2['profit'] = people2['profit'].fillna('0').astype(np.int64) obs = ['group_1'] for i in range(1, 10): obs.append('char_' + str(i)) for x in obs: people2[x] = people2[x].fillna('type 0') people2[x] = people2[x].str.split(' ').str[1] bools = [] for i in range(10, 38): bools.append('char_' + str(i)) for x in list(set(obs).union(set(bools))): people2[x] = pd.to_numeric(people2[x]).astype(int) for x in list(set(obs).union(set(bools))): fig=plt.figure(dpi=100,figsize=(12,3)) fig.suptitle(x, fontsize=15) plt.subplot(141) pos=people2[people2['prof_label']==1] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(),pos[x].max()+1)) plt.title('Very Good') plt.subplot(142) pos=people2[people2['prof_label']==2] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(),pos[x].max()+1)) plt.title('Good') plt.subplot(143) pos=people2[people2['prof_label']==3] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(),pos[x].max()+1)) plt.title('Bad') plt.subplot(144) pos=people2[people2['prof_label']==4] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(),pos[x].max()+1)) plt.title('Very Bad') plt.show() for x in bools: print(x) pos = people2[people2[x] == 1] fig.suptitle(x, fontsize=15) plt.hist(pos['prof_label'], 4, range=(1, 5)) plt.show()
code
332834/cell_6
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_4.png", "image_output_35.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "image_output_3.png", "image_output_29.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') plt.legend() plt.show()
code
332834/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1 people2 = pd.merge(people, hstry, on='people_id', how='inner') people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64) people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64) people2['profit'] = people2['profit'].fillna('0').astype(np.int64) obs = ['group_1'] for i in range(1, 10): obs.append('char_' + str(i)) for x in obs: people2[x] = people2[x].fillna('type 0') people2[x] = people2[x].str.split(' ').str[1] bools = [] for i in range(10, 38): bools.append('char_' + str(i)) for x in list(set(obs).union(set(bools))): people2[x] = pd.to_numeric(people2[x]).astype(int) for x in list(set(obs).union(set(bools))): fig = plt.figure(dpi=100, figsize=(12, 3)) fig.suptitle(x, fontsize=15) plt.subplot(141) pos = people2[people2['prof_label'] == 1] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(), pos[x].max() + 1)) plt.title('Very Good') plt.subplot(142) pos = people2[people2['prof_label'] == 2] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(), pos[x].max() + 1)) plt.title('Good') plt.subplot(143) pos = people2[people2['prof_label'] == 3] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(), pos[x].max() + 1)) plt.title('Bad') plt.subplot(144) pos = people2[people2['prof_label'] == 4] plt.hist(pos[x], len(pos[x].unique()), range=(pos[x].min(), pos[x].max() + 1)) plt.title('Very Bad') plt.show()
code
332834/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'].unique()
code
332834/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) plt.hist(hstry['prof_label'], 4, range=(1, 5)) plt.show()
code
332834/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10)
code
332834/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['profit'].describe()
code
18135285/cell_4
[ "text_html_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark sdf_train = spark.read.csv('../input/train.csv', inferSchema=True, header=True) print(sdf_train.printSchema()) pdf = sdf_train.limit(5).toPandas() pdf.T
code
18135285/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from pyspark.sql import SparkSession import os print(os.listdir('../input'))
code
18135285/cell_1
[ "text_plain_output_1.png" ]
! pip install pyspark
code
18135285/cell_3
[ "text_html_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark
code
18135285/cell_17
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd from pyspark.sql import SparkSession import os print(os.listdir('submission'))
code
18135285/cell_14
[ "text_plain_output_1.png" ]
from pyspark.ml import Pipeline from pyspark.ml.classification import DecisionTreeClassifier from pyspark.ml.feature import VectorAssembler from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark sdf_train = spark.read.csv('../input/train.csv', inferSchema=True, header=True) pdf = sdf_train.limit(5).toPandas() pdf.T sdf_test = spark.read.csv('../input/test.csv', inferSchema=True, header=True) pdf = sdf_test.limit(5).toPandas() pdf.T from pyspark.sql import functions as F sdf_typecast = sdf_train.withColumn('Ticket', sdf_train['Ticket'].cast('double')) sdf_typecast = sdf_typecast.fillna(0) numeric_cols = ['PassengerId', 'Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare'] numeric_features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare'] sdf_train_subset = sdf_typecast.select(numeric_cols) from pyspark.ml.feature import VectorAssembler vectAssembler = VectorAssembler(inputCols=numeric_features, outputCol='vect_features') from pyspark.ml.classification import DecisionTreeClassifier dt = DecisionTreeClassifier(labelCol='Survived', featuresCol='vect_features') from pyspark.ml import Pipeline pipeline = Pipeline(stages=[vectAssembler, dt]) model = pipeline.fit(sdf_train_subset) numeric_cols_test = ['PassengerId', 'Pclass', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare'] sdf_test_subset = sdf_test.withColumn('Ticket', sdf_test['Ticket'].cast('double')).fillna(0).select(numeric_cols_test) sdf_predict = model.transform(sdf_test_subset) pdf = sdf_predict.limit(10).toPandas() pdf.T
code
18135285/cell_5
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark sdf_train = spark.read.csv('../input/train.csv', inferSchema=True, header=True) pdf = sdf_train.limit(5).toPandas() pdf.T sdf_test = spark.read.csv('../input/test.csv', inferSchema=True, header=True) pdf = sdf_test.limit(5).toPandas() pdf.T
code
16169565/cell_13
[ "image_output_1.png" ]
from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras.layers.convolutional import Conv2D from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.models import Sequential, model_from_json from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from skimage import io, color, exposure, transform import cv2 import cv2 import numpy as np # linear algebra import os import os def preprocess_img(img): # Histogram normalization in y hsv = color.rgb2hsv(img) hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img,-1) return img import cv2 DATA_DIR_TRAIN = '../input/dataset/dataset/train' CATEGORIES = ['left', 'right'] x_train = [] y_train = [] img_data_train = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TRAIN, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_train.append(img) new_img = preprocess_img(img) x_train.append(new_img) y_train.append(class_num) import cv2 DATA_DIR_TEST = '../input/dataset/dataset/test' CATEGORIES = ['left', 'right'] x_test = [] y_test = [] img_data_test = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TEST, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_test.append(img) new_img = preprocess_img(img) x_test.append(new_img) y_test.append(class_num) x_train_np = np.array(x_train, dtype='float32') y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train] x_test_np = np.array(x_test, dtype='float32') y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test] random_array = np.random.randint(len(x_train), size=25) random_array grids = (5,5) counter = 0 plt.figure(figsize=(10,10)) for i in range(0, 25): ax = plt.subplot(5, 5, i+1) img = img_data_train[random_array[i]] rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ax = plt.imshow(rgb_img, cmap='gray') plt.title(CATEGORIES[y_train[random_array[i]]]) plt.xticks([]) plt.yticks([]) def cnn_model(): model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=(3, IMG_SIZE, IMG_SIZE), activation='relu')) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) return model def lr_schedule(epoch): return lr * 0.1 ** int(epoch / 10) datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10.0) datagen.fit(x_train_np) model = cnn_model() lr = 0.01 sgd = SGD(lr=lr, decay=1e-06, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) def lr_schedule(epoch): return lr * 0.1 ** int(epoch / 10) batch_size = 32 nb_epoch = 20 model.fit_generator(datagen.flow(x_train_np, y_train_np, batch_size=batch_size), steps_per_epoch=x_train_np.shape[0], epochs=nb_epoch, validation_data=(x_test_np, y_test_np), callbacks=[LearningRateScheduler(lr_schedule), ModelCheckpoint('model.h5', save_best_only=True)]) random_array = np.random.randint(len(x_test), size=25) random_array grids = (5, 5) counter = 0 plt.figure(figsize=(10, 10)) for i in range(0, 25): ax = plt.subplot(5, 5, i + 1) img = img_data_test[random_array[i]] rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ax = plt.imshow(rgb_img, cmap='gray') x = x_test[random_array[i]] y_predict = np.argmax(model.predict(x.reshape(1, 3, 48, 48)), axis=1) plt.title(CATEGORIES[int(y_predict)]) plt.xticks([]) plt.yticks([])
code
16169565/cell_6
[ "text_plain_output_1.png" ]
from skimage import io, color, exposure, transform import cv2 import cv2 import numpy as np # linear algebra import os import os def preprocess_img(img): # Histogram normalization in y hsv = color.rgb2hsv(img) hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img,-1) return img import cv2 DATA_DIR_TRAIN = '../input/dataset/dataset/train' CATEGORIES = ['left', 'right'] x_train = [] y_train = [] img_data_train = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TRAIN, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_train.append(img) new_img = preprocess_img(img) x_train.append(new_img) y_train.append(class_num) import cv2 DATA_DIR_TEST = '../input/dataset/dataset/test' CATEGORIES = ['left', 'right'] x_test = [] y_test = [] img_data_test = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TEST, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_test.append(img) new_img = preprocess_img(img) x_test.append(new_img) y_test.append(class_num) x_train_np = np.array(x_train, dtype='float32') y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train] x_test_np = np.array(x_test, dtype='float32') y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test] random_array = np.random.randint(len(x_train), size=25) random_array
code
16169565/cell_11
[ "text_plain_output_1.png" ]
from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras.layers.convolutional import Conv2D from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.pooling import MaxPooling2D from keras.models import Sequential, model_from_json from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from skimage import io, color, exposure, transform import cv2 import cv2 import numpy as np # linear algebra import os import os def preprocess_img(img): # Histogram normalization in y hsv = color.rgb2hsv(img) hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img,-1) return img import cv2 DATA_DIR_TRAIN = '../input/dataset/dataset/train' CATEGORIES = ['left', 'right'] x_train = [] y_train = [] img_data_train = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TRAIN, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_train.append(img) new_img = preprocess_img(img) x_train.append(new_img) y_train.append(class_num) import cv2 DATA_DIR_TEST = '../input/dataset/dataset/test' CATEGORIES = ['left', 'right'] x_test = [] y_test = [] img_data_test = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TEST, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_test.append(img) new_img = preprocess_img(img) x_test.append(new_img) y_test.append(class_num) x_train_np = np.array(x_train, dtype='float32') y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train] x_test_np = np.array(x_test, dtype='float32') y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test] def cnn_model(): model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=(3, IMG_SIZE, IMG_SIZE), activation='relu')) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) return model def lr_schedule(epoch): return lr * 0.1 ** int(epoch / 10) datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10.0) datagen.fit(x_train_np) model = cnn_model() lr = 0.01 sgd = SGD(lr=lr, decay=1e-06, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) def lr_schedule(epoch): return lr * 0.1 ** int(epoch / 10) batch_size = 32 nb_epoch = 20 model.fit_generator(datagen.flow(x_train_np, y_train_np, batch_size=batch_size), steps_per_epoch=x_train_np.shape[0], epochs=nb_epoch, validation_data=(x_test_np, y_test_np), callbacks=[LearningRateScheduler(lr_schedule), ModelCheckpoint('model.h5', save_best_only=True)])
code
16169565/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd from skimage import io, color, exposure, transform import os import glob import h5py from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, model_from_json from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.optimizers import SGD from keras.utils import np_utils from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras import backend as K K.set_image_data_format('channels_first') from matplotlib import pyplot as plt import os print(os.listdir('../input')) NUM_CLASSES = 2 IMG_SIZE = 48
code
16169565/cell_7
[ "image_output_1.png" ]
from skimage import io, color, exposure, transform import cv2 import cv2 import numpy as np # linear algebra import os import os def preprocess_img(img): # Histogram normalization in y hsv = color.rgb2hsv(img) hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img,-1) return img import cv2 DATA_DIR_TRAIN = '../input/dataset/dataset/train' CATEGORIES = ['left', 'right'] x_train = [] y_train = [] img_data_train = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TRAIN, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_train.append(img) new_img = preprocess_img(img) x_train.append(new_img) y_train.append(class_num) import cv2 DATA_DIR_TEST = '../input/dataset/dataset/test' CATEGORIES = ['left', 'right'] x_test = [] y_test = [] img_data_test = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TEST, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_test.append(img) new_img = preprocess_img(img) x_test.append(new_img) y_test.append(class_num) x_train_np = np.array(x_train, dtype='float32') y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train] x_test_np = np.array(x_test, dtype='float32') y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test] random_array = np.random.randint(len(x_train), size=25) random_array grids = (5, 5) counter = 0 plt.figure(figsize=(10, 10)) for i in range(0, 25): ax = plt.subplot(5, 5, i + 1) img = img_data_train[random_array[i]] rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ax = plt.imshow(rgb_img, cmap='gray') plt.title(CATEGORIES[y_train[random_array[i]]]) plt.xticks([]) plt.yticks([])
code
16169565/cell_12
[ "text_plain_output_1.png" ]
from skimage import io, color, exposure, transform import cv2 import cv2 import numpy as np # linear algebra import os import os def preprocess_img(img): # Histogram normalization in y hsv = color.rgb2hsv(img) hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img,-1) return img import cv2 DATA_DIR_TRAIN = '../input/dataset/dataset/train' CATEGORIES = ['left', 'right'] x_train = [] y_train = [] img_data_train = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TRAIN, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_train.append(img) new_img = preprocess_img(img) x_train.append(new_img) y_train.append(class_num) import cv2 DATA_DIR_TEST = '../input/dataset/dataset/test' CATEGORIES = ['left', 'right'] x_test = [] y_test = [] img_data_test = [] for category in CATEGORIES: path = os.path.join(DATA_DIR_TEST, category) class_num = CATEGORIES.index(category) for img_path in os.listdir(path): img = cv2.imread(os.path.join(path, img_path)) img_data_test.append(img) new_img = preprocess_img(img) x_test.append(new_img) y_test.append(class_num) x_train_np = np.array(x_train, dtype='float32') y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train] x_test_np = np.array(x_test, dtype='float32') y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test] random_array = np.random.randint(len(x_train), size=25) random_array random_array = np.random.randint(len(x_test), size=25) random_array
code