path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105216483/cell_15
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] dataset_train = data_path_train churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] churn_dataset_train['Churn'].value_counts()
code
105216483/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] dataset_train = data_path_train churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] not_churn_dataset_train['Churn'].value_counts()
code
105216483/cell_17
[ "text_plain_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] dataset_train = data_path_train churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] new_dataset_train = not_churn_dataset_train.copy(deep=True) for i in range(3): new_dataset_train = new_dataset_train.append(churn_dataset_train) new_dataset_train
code
105216483/cell_31
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score = cross_val_score(model, X, y, scoring='f1') new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean', 'Score Standard Deviation']) dataset_train = data_path_train dataset_test = data_path_test churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] new_dataset_train = not_churn_dataset_train.copy(deep=True) for i in range(3): new_dataset_train = new_dataset_train.append(churn_dataset_train) new_dataset_train dataset_train = new_dataset_train.sample(frac=1, random_state=42) dataset_train['Churn'].value_counts() encoded_train = pd.get_dummies(dataset_train, columns=['location_code']) encoded_test = pd.get_dummies(dataset_test, columns=['location_code']) encoded_train['Churn'] = encoded_train['Churn'].str.lower() for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']: encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0}) X = encoded_train.drop(columns=['Churn']) y = encoded_train.Churn scaler = StandardScaler() stdscaled = X.copy(deep=True) stdscaled[stdscaled.columns] = scaler.fit_transform(stdscaled[stdscaled.columns]) evaluate_for_models(models, stdscaled, y)
code
105216483/cell_24
[ "text_plain_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score = cross_val_score(model, X, y, scoring='f1') new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean', 'Score Standard Deviation']) dataset_train = data_path_train dataset_test = data_path_test churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] new_dataset_train = not_churn_dataset_train.copy(deep=True) for i in range(3): new_dataset_train = new_dataset_train.append(churn_dataset_train) new_dataset_train dataset_train = new_dataset_train.sample(frac=1, random_state=42) dataset_train['Churn'].value_counts() encoded_train = pd.get_dummies(dataset_train, columns=['location_code']) encoded_test = pd.get_dummies(dataset_test, columns=['location_code']) encoded_train['Churn'] = encoded_train['Churn'].str.lower() for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']: encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0}) encoded_test.tail()
code
105216483/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] dataset_test = data_path_test dataset_test.head()
code
105216483/cell_27
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv' data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv' dataset_train = pd.read_csv(data_path_train) dataset_test = pd.read_csv(data_path_test) dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True) dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True) subset_train = dataset_train.columns.drop('customer_id') duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train) subset_test = dataset_test.columns.drop('customer_id') duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test) nan_added_dataset_train = duplicates_droped_dataset_train.copy() nan_added_dataset_test = duplicates_droped_dataset_test.copy() nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0 for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']: nan_added_dataset_train[col] = nan_added_dataset_train[col].abs() nan_added_dataset_test[col] = nan_added_dataset_test[col].abs() odm_handled_dataset_train = nan_added_dataset_train.copy() odm_handled_dataset_test = nan_added_dataset_test.copy() for col in ['account_length', 'location_code']: odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True) odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True) odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no' odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes' odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0 odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median() odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0 odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_train['total_day_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_train['total_day_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_train['total_day_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_train['total_eve_min'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_train['total_night_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_train['total_night_calls'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge']) odm_handled_dataset_test['total_day_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min']) odm_handled_dataset_test['total_day_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge']) odm_handled_dataset_test['total_day_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge']) odm_handled_dataset_test['total_eve_min'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min']) odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge']) odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge']) odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes']) odm_handled_dataset_test['total_night_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge']) odm_handled_dataset_test['total_night_calls'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge']) odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes']) odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True) odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge']) odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True) odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True) odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1 odm_handled_dataset_train = odm_handled_dataset_train.sort_index() odm_handled_dataset_test = odm_handled_dataset_test.sort_index() odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0 odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15 pre_processed_dataset_train = odm_handled_dataset_train pre_processed_dataset_test = odm_handled_dataset_test data_path_train = pre_processed_dataset_train data_path_test = pre_processed_dataset_test rs = 42 models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)] def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score = cross_val_score(model, X, y, scoring='f1') new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean', 'Score Standard Deviation']) dataset_train = data_path_train dataset_test = data_path_test churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes'] not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No'] new_dataset_train = not_churn_dataset_train.copy(deep=True) for i in range(3): new_dataset_train = new_dataset_train.append(churn_dataset_train) new_dataset_train dataset_train = new_dataset_train.sample(frac=1, random_state=42) dataset_train['Churn'].value_counts() encoded_train = pd.get_dummies(dataset_train, columns=['location_code']) encoded_test = pd.get_dummies(dataset_test, columns=['location_code']) encoded_train['Churn'] = encoded_train['Churn'].str.lower() for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']: encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0}) X = encoded_train.drop(columns=['Churn']) y = encoded_train.Churn scaler = StandardScaler() stdscaled = X.copy(deep=True) stdscaled[stdscaled.columns] = scaler.fit_transform(stdscaled[stdscaled.columns]) stdscaled.head()
code
33095782/cell_13
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1)
code
33095782/cell_6
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.datasets import imdb import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) X
code
33095782/cell_1
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install --upgrade pandas-profiling !pip install --upgrade hypertools !pip install --upgrade pandas
code
33095782/cell_7
[ "text_plain_output_1.png" ]
from keras.datasets import imdb import matplotlib.pyplot as plt import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) print('Review length: ') result = list(map(len, X)) print('Mean %.2f words (%f)' % (np.mean(result), np.std(result))) fig, ax = plt.subplots(figsize=(10, 5)) ax.set_title('Boxplot of review lenght') ax.boxplot(result)
code
33095782/cell_18
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1) scores = model.evaluate(X_test, y_test, verbose=0) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Convolution1D(activation='relu', filters=vector_size, kernel_size=3, padding='same')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(250, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1) scores = model.evaluate(X_test, y_test, verbose=0) print('Accuracy: %.2f%%' % (scores[1] * 100))
code
33095782/cell_16
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1) scores = model.evaluate(X_test, y_test, verbose=0) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Convolution1D(activation='relu', filters=vector_size, kernel_size=3, padding='same')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(250, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary())
code
33095782/cell_17
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1) scores = model.evaluate(X_test, y_test, verbose=0) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Convolution1D(activation='relu', filters=vector_size, kernel_size=3, padding='same')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(250, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1)
code
33095782/cell_14
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=128, verbose=1) scores = model.evaluate(X_test, y_test, verbose=0) print('Accuracy: %.2f%%' % (scores[1] * 100))
code
33095782/cell_10
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) X_train[20]
code
33095782/cell_12
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import Dense from keras.layers import Flatten from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing import sequence import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) top_words = 10000 vector_size = 32 max_review_lenght = 800 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) X_train = sequence.pad_sequences(X_train, maxlen=max_review_lenght) X_test = sequence.pad_sequences(X_test, maxlen=max_review_lenght) model = Sequential() model.add(Embedding(top_words, vector_size, input_length=max_review_lenght)) model.add(Flatten()) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(30, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary())
code
33095782/cell_5
[ "text_plain_output_1.png" ]
from keras.datasets import imdb import numpy as np (X_train, y_train), (X_test, y_test) = imdb.load_data() X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) print('Training data: ') print(X.shape) print(y.shape) print('Classes: ') print(np.unique(y)) print('Number of words: ') print(len(np.unique(np.hstack(X))))
code
105195844/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing.info()
code
105195844/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum()
code
105195844/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.describe()
code
105195844/cell_25
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() from sklearn.preprocessing import LabelEncoder ocean_le = LabelEncoder() housing['ocean_proximity'] = ocean_le.fit_transform(housing['ocean_proximity']) ocean_le.classes_
code
105195844/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') x = housing.copy() x = x[x['total_bedrooms'] < 3000] x.shape
code
105195844/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing.hist(bins=50, figsize=(20, 15))
code
105195844/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing
code
105195844/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing.info()
code
105195844/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape sns.heatmap(housing.isnull())
code
105195844/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() sns.heatmap(housing.isnull())
code
105195844/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105195844/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.head(5)
code
105195844/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum()
code
105195844/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') x = housing.copy() x[x['total_bedrooms'] >= 3000].shape
code
105195844/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') housing.info()
code
105195844/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.info()
code
105195844/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns
code
105195844/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns total_bedroom_median = housing['total_bedrooms'].median() total_bedroom_median
code
105195844/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') x = housing.copy() x = x[x['total_bedrooms'] < 3000] x.shape sns.scatterplot(x=x['total_bedrooms'], y=x['median_house_value'], color='brown')
code
105195844/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') x = housing.copy() sns.scatterplot(x=x['total_bedrooms'], y=x['median_house_value'])
code
105195844/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape
code
105195844/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing = pd.read_csv('../input/california-housing-prices/housing.csv', sep=',', encoding='utf-8') housing housing.shape housing.isnull().sum() housing.columns housing.isnull().sum() housing = housing.astype('float') x = housing.copy() x = x[x['total_bedrooms'] < 3000] x.shape sns.boxplot(housing['total_bedrooms'])
code
2025030/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) sns.barplot(x='Parch', y='Survived', hue='Sex', data=data_train)
code
2025030/cell_20
[ "text_html_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_y = data_train['Survived'] train_y.sample(4)
code
2025030/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5)
code
2025030/cell_40
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder from sklearn.svm import SVC import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_x = data_train[['Pclass', 'Sex', 'Family_Size']] train_x.sample(5) train_y = data_train['Survived'] train_y.sample(4) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() train_x['Sex'] = lb_make.fit_transform(train_x['Sex']) from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier model = GaussianNB() model1 = SVC() model2 = RandomForestClassifier() model3 = GradientBoostingClassifier() model.fit(train_x, train_y) data_test['Family_Size'] = data_test['SibSp'] + data_test['Parch'] test_x = data_test[['Pclass', 'Sex', 'Family_Size']] test_x['Sex'] = lb_make.fit_transform(test_x['Sex']) test_x.sample(5) y_model = model.predict(test_x) Pa_id = data_test['PassengerId'] results = pd.DataFrame({'PassengerId': Pa_id, 'Survived': y_model}) results.sample(5)
code
2025030/cell_29
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier model = GaussianNB() model1 = SVC() model2 = RandomForestClassifier() model3 = GradientBoostingClassifier() models = [model, model1, model2, model3] for i in models: i.fit(xtrain, ytrain) ypred = i.predict(xtest) print(i, accuracy_score(ytest, ypred))
code
2025030/cell_11
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) sns.barplot(x='Embarked', y='Survived', hue='Sex', data=data_train)
code
2025030/cell_19
[ "image_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_x = data_train[['Pclass', 'Sex', 'Family_Size']] train_x.sample(5)
code
2025030/cell_32
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_x = data_train[['Pclass', 'Sex', 'Family_Size']] train_x.sample(5) train_y = data_train['Survived'] train_y.sample(4) from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier model = GaussianNB() model1 = SVC() model2 = RandomForestClassifier() model3 = GradientBoostingClassifier() model.fit(train_x, train_y)
code
2025030/cell_8
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) sns.barplot(x='Pclass', y='Survived', hue='Sex', data=data_train)
code
2025030/cell_16
[ "image_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5)
code
2025030/cell_17
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) sns.barplot(x='Family_Size', y='Survived', hue='Sex', data=data_train)
code
2025030/cell_35
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_x = data_train[['Pclass', 'Sex', 'Family_Size']] train_x.sample(5) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() train_x['Sex'] = lb_make.fit_transform(train_x['Sex']) data_test['Family_Size'] = data_test['SibSp'] + data_test['Parch'] test_x = data_test[['Pclass', 'Sex', 'Family_Size']] test_x['Sex'] = lb_make.fit_transform(test_x['Sex']) test_x.sample(5)
code
2025030/cell_24
[ "text_html_output_1.png" ]
from sklearn.cross_validation import train_test_split
code
2025030/cell_22
[ "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) data_train.sample(5) train_x = data_train[['Pclass', 'Sex', 'Family_Size']] train_x.sample(5) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() train_x['Sex'] = lb_make.fit_transform(train_x['Sex'])
code
2025030/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) sns.barplot(x='SibSp', y='Survived', hue='Sex', data=data_train)
code
2025030/cell_12
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.sample(5) sns.barplot(x='Age', y='Survived', hue='Sex', data=data_train)
code
73067972/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns engagement_data.rename(columns={'lp_id': 'LP ID'}, inplace=True) merged = pd.merge(engagement_data, product_data, on='LP ID') m = merged.groupby('Product Name')['engagement_index'].sum().sort_values(ascending=False).head(10) plt.figure(figsize=(15, 6)) plt.bar(m.index, m.values, color=['#6930c3', '#5e60ce', '#0096c7', '#48cae4', '#ade8f4', '#ff7f51', '#ff9b54', '#ffbf69']) plt.xlabel('Product Name') plt.xticks(rotation=90) plt.ylabel('Total page-load per 1000 students') plt.title('Top 10 products with number of page-load per 1000 students')
code
73067972/cell_9
[ "image_output_1.png" ]
from plotly.offline import plot, iplot, init_notebook_mode import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go from plotly.offline import plot, iplot, init_notebook_mode init_notebook_mode(connected=True) import plotly import plotly.graph_objects as go import plotly.express as px values = [['District', 'District', 'District', 'District', 'District', 'District', 'District', 'Product', 'Product', 'Product', 'Product', 'Product', 'Product', 'Engagement', 'Engagement', 'Engagement', 'Engagement'], ['district_id', 'state', 'locale', 'pct_black/hispanic', 'pct_free/reduced', 'countyconnectionsratio', 'pptotalraw', 'LP ID', 'URL', 'Product Name', 'Provider/Company Name', 'Sector(s)', 'Primary Essential Function', 'time', 'lp_id', 'pct_access', 'engagement_index'], ['The unique identifier of the school district', 'The state where the district resides in', 'NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural.', 'Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data', 'Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data', 'ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version)', "Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district.", 'The unique identifier of the product', 'Web Link to the specific product', 'Name of the specific product', 'Name of the product provider', 'Sector of education where the product is used', 'The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: ,<b>LC = Learning & Curriculum, CM = Classroom Management<b>, and <b>SDO = School & District Operations<b>. Each of these categories have multiple sub-categories with which the products were labeled', 'date in YYYY-MM-DD', 'The unique identifier of the product', 'Percentage of students in the district have at least one page-load event of a given product and on a given day', 'Total page-load events per one thousand students of a given product and on a given day']] fig = go.Figure(data=[go.Table(columnorder=[1, 2, 3], columnwidth=[60, 80, 400], header=dict(values=[['<b>Dataset</b>'], ['<b>Columns Names</b>'], ['<b>Description</b>']], line_color='darkslategray', fill_color='royalblue', align=['left', 'center'], font=dict(color='white', size=15), height=40), cells=dict(values=values, line_color='darkslategray', fill=dict(color=['paleturquoise', 'white']), align=['left', 'center'], font_size=12, height=30))]) import pandas as pd district_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(district_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[district_data['district_id'], district_data['state'], district_data['locale'], district_data['pct_black/hispanic'], district_data['pct_free/reduced'], district_data['county_connections_ratio'], district_data['pp_total_raw']], fill_color=[['white', 'lavender'] * len(district_data)], align='left'))]) product_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(product_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[product_data['LP ID'], product_data['URL'], product_data['Product Name'], product_data['Provider/Company Name'], product_data['Sector(s)'], product_data['Primary Essential Function']], fill_color=[['white', 'lavender'] * len(product_data)], align='left'))]) all_csv = [] for district in district_data['district_id']: df = pd.read_csv(f'/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/{district}.csv') df.insert(0, 'district_id', district) all_csv.append(df) engagement_data = pd.concat(all_csv) district_data.dropna(subset=['state'], axis=0, inplace=True) district_data.drop(columns=['pp_total_raw'], axis=1, inplace=True) # states with number of school districts a= district_data["state"].value_counts() fig= px.scatter(a, x=a.index, y=a.values, size=a.values, color=a.index, hover_name= a.index, size_max=60, title="States with number of school districts") fig.update_layout() fig.show() locale = district_data['locale'].value_counts() fig = px.scatter(locale, x=locale.index, y=locale.values, size=locale.values, color=locale.index, hover_name=locale.index, size_max=60, title='Locale with number of school districts') fig.update_layout() fig.show()
code
73067972/cell_4
[ "text_html_output_1.png" ]
from plotly.offline import plot, iplot, init_notebook_mode import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go from plotly.offline import plot, iplot, init_notebook_mode init_notebook_mode(connected=True) import plotly import plotly.graph_objects as go import plotly.express as px values = [['District', 'District', 'District', 'District', 'District', 'District', 'District', 'Product', 'Product', 'Product', 'Product', 'Product', 'Product', 'Engagement', 'Engagement', 'Engagement', 'Engagement'], ['district_id', 'state', 'locale', 'pct_black/hispanic', 'pct_free/reduced', 'countyconnectionsratio', 'pptotalraw', 'LP ID', 'URL', 'Product Name', 'Provider/Company Name', 'Sector(s)', 'Primary Essential Function', 'time', 'lp_id', 'pct_access', 'engagement_index'], ['The unique identifier of the school district', 'The state where the district resides in', 'NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural.', 'Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data', 'Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data', 'ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version)', "Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district.", 'The unique identifier of the product', 'Web Link to the specific product', 'Name of the specific product', 'Name of the product provider', 'Sector of education where the product is used', 'The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: ,<b>LC = Learning & Curriculum, CM = Classroom Management<b>, and <b>SDO = School & District Operations<b>. Each of these categories have multiple sub-categories with which the products were labeled', 'date in YYYY-MM-DD', 'The unique identifier of the product', 'Percentage of students in the district have at least one page-load event of a given product and on a given day', 'Total page-load events per one thousand students of a given product and on a given day']] fig = go.Figure(data=[go.Table(columnorder=[1, 2, 3], columnwidth=[60, 80, 400], header=dict(values=[['<b>Dataset</b>'], ['<b>Columns Names</b>'], ['<b>Description</b>']], line_color='darkslategray', fill_color='royalblue', align=['left', 'center'], font=dict(color='white', size=15), height=40), cells=dict(values=values, line_color='darkslategray', fill=dict(color=['paleturquoise', 'white']), align=['left', 'center'], font_size=12, height=30))]) import pandas as pd district_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(district_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[district_data['district_id'], district_data['state'], district_data['locale'], district_data['pct_black/hispanic'], district_data['pct_free/reduced'], district_data['county_connections_ratio'], district_data['pp_total_raw']], fill_color=[['white', 'lavender'] * len(district_data)], align='left'))]) fig.show() product_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(product_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[product_data['LP ID'], product_data['URL'], product_data['Product Name'], product_data['Provider/Company Name'], product_data['Sector(s)'], product_data['Primary Essential Function']], fill_color=[['white', 'lavender'] * len(product_data)], align='left'))]) fig.show()
code
73067972/cell_2
[ "text_html_output_1.png" ]
from plotly.offline import plot, iplot, init_notebook_mode import plotly.graph_objects as go from plotly.offline import plot, iplot, init_notebook_mode init_notebook_mode(connected=True) import plotly import plotly.graph_objects as go import plotly.express as px values = [['District', 'District', 'District', 'District', 'District', 'District', 'District', 'Product', 'Product', 'Product', 'Product', 'Product', 'Product', 'Engagement', 'Engagement', 'Engagement', 'Engagement'], ['district_id', 'state', 'locale', 'pct_black/hispanic', 'pct_free/reduced', 'countyconnectionsratio', 'pptotalraw', 'LP ID', 'URL', 'Product Name', 'Provider/Company Name', 'Sector(s)', 'Primary Essential Function', 'time', 'lp_id', 'pct_access', 'engagement_index'], ['The unique identifier of the school district', 'The state where the district resides in', 'NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural.', 'Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data', 'Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data', 'ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version)', "Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district.", 'The unique identifier of the product', 'Web Link to the specific product', 'Name of the specific product', 'Name of the product provider', 'Sector of education where the product is used', 'The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: ,<b>LC = Learning & Curriculum, CM = Classroom Management<b>, and <b>SDO = School & District Operations<b>. Each of these categories have multiple sub-categories with which the products were labeled', 'date in YYYY-MM-DD', 'The unique identifier of the product', 'Percentage of students in the district have at least one page-load event of a given product and on a given day', 'Total page-load events per one thousand students of a given product and on a given day']] fig = go.Figure(data=[go.Table(columnorder=[1, 2, 3], columnwidth=[60, 80, 400], header=dict(values=[['<b>Dataset</b>'], ['<b>Columns Names</b>'], ['<b>Description</b>']], line_color='darkslategray', fill_color='royalblue', align=['left', 'center'], font=dict(color='white', size=15), height=40), cells=dict(values=values, line_color='darkslategray', fill=dict(color=['paleturquoise', 'white']), align=['left', 'center'], font_size=12, height=30))]) fig.show()
code
73067972/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from plotly.offline import plot, iplot, init_notebook_mode import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go from plotly.offline import plot, iplot, init_notebook_mode init_notebook_mode(connected=True) import plotly import plotly.graph_objects as go import plotly.express as px values = [['District', 'District', 'District', 'District', 'District', 'District', 'District', 'Product', 'Product', 'Product', 'Product', 'Product', 'Product', 'Engagement', 'Engagement', 'Engagement', 'Engagement'], ['district_id', 'state', 'locale', 'pct_black/hispanic', 'pct_free/reduced', 'countyconnectionsratio', 'pptotalraw', 'LP ID', 'URL', 'Product Name', 'Provider/Company Name', 'Sector(s)', 'Primary Essential Function', 'time', 'lp_id', 'pct_access', 'engagement_index'], ['The unique identifier of the school district', 'The state where the district resides in', 'NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural.', 'Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data', 'Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data', 'ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version)', "Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district.", 'The unique identifier of the product', 'Web Link to the specific product', 'Name of the specific product', 'Name of the product provider', 'Sector of education where the product is used', 'The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: ,<b>LC = Learning & Curriculum, CM = Classroom Management<b>, and <b>SDO = School & District Operations<b>. Each of these categories have multiple sub-categories with which the products were labeled', 'date in YYYY-MM-DD', 'The unique identifier of the product', 'Percentage of students in the district have at least one page-load event of a given product and on a given day', 'Total page-load events per one thousand students of a given product and on a given day']] fig = go.Figure(data=[go.Table(columnorder=[1, 2, 3], columnwidth=[60, 80, 400], header=dict(values=[['<b>Dataset</b>'], ['<b>Columns Names</b>'], ['<b>Description</b>']], line_color='darkslategray', fill_color='royalblue', align=['left', 'center'], font=dict(color='white', size=15), height=40), cells=dict(values=values, line_color='darkslategray', fill=dict(color=['paleturquoise', 'white']), align=['left', 'center'], font_size=12, height=30))]) import pandas as pd district_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(district_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[district_data['district_id'], district_data['state'], district_data['locale'], district_data['pct_black/hispanic'], district_data['pct_free/reduced'], district_data['county_connections_ratio'], district_data['pp_total_raw']], fill_color=[['white', 'lavender'] * len(district_data)], align='left'))]) product_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(product_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[product_data['LP ID'], product_data['URL'], product_data['Product Name'], product_data['Provider/Company Name'], product_data['Sector(s)'], product_data['Primary Essential Function']], fill_color=[['white', 'lavender'] * len(product_data)], align='left'))]) all_csv = [] for district in district_data['district_id']: df = pd.read_csv(f'/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/{district}.csv') df.insert(0, 'district_id', district) all_csv.append(df) engagement_data = pd.concat(all_csv) district_data.dropna(subset=['state'], axis=0, inplace=True) district_data.drop(columns=['pp_total_raw'], axis=1, inplace=True) a = district_data['state'].value_counts() fig = px.scatter(a, x=a.index, y=a.values, size=a.values, color=a.index, hover_name=a.index, size_max=60, title='States with number of school districts') fig.update_layout() fig.show()
code
73067972/cell_14
[ "text_html_output_2.png", "text_html_output_1.png" ]
def custom_palette(custom_colors): customPalette = sns.set_palette(sns.color_palette(custom_colors)) sns.palplot(sns.color_palette(custom_colors), size=0.8) plt.tick_params(axis='both', labelsize=0, length=0) import matplotlib.pyplot as plt import seaborn as sns red = ['#4f000b', '#720026', '#ce4257', '#ff7f51', '#ff9b54'] bo = ['#6930c3', '#5e60ce', '#0096c7', '#48cae4', '#ade8f4', '#ff7f51', '#ff9b54', '#ffbf69'] pink = ['#aa4465', '#dd2d4a', '#f26a8d', '#f49cbb', '#ffcbf2', '#e2afff', '#ff86c8', '#ffa3a5', '#ffbf81', '#e9b827', '#f9e576'] custom_palette(pink)
code
73067972/cell_5
[ "text_html_output_1.png" ]
from plotly.offline import plot, iplot, init_notebook_mode import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go from plotly.offline import plot, iplot, init_notebook_mode init_notebook_mode(connected=True) import plotly import plotly.graph_objects as go import plotly.express as px values = [['District', 'District', 'District', 'District', 'District', 'District', 'District', 'Product', 'Product', 'Product', 'Product', 'Product', 'Product', 'Engagement', 'Engagement', 'Engagement', 'Engagement'], ['district_id', 'state', 'locale', 'pct_black/hispanic', 'pct_free/reduced', 'countyconnectionsratio', 'pptotalraw', 'LP ID', 'URL', 'Product Name', 'Provider/Company Name', 'Sector(s)', 'Primary Essential Function', 'time', 'lp_id', 'pct_access', 'engagement_index'], ['The unique identifier of the school district', 'The state where the district resides in', 'NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural.', 'Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data', 'Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data', 'ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version)', "Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district.", 'The unique identifier of the product', 'Web Link to the specific product', 'Name of the specific product', 'Name of the product provider', 'Sector of education where the product is used', 'The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: ,<b>LC = Learning & Curriculum, CM = Classroom Management<b>, and <b>SDO = School & District Operations<b>. Each of these categories have multiple sub-categories with which the products were labeled', 'date in YYYY-MM-DD', 'The unique identifier of the product', 'Percentage of students in the district have at least one page-load event of a given product and on a given day', 'Total page-load events per one thousand students of a given product and on a given day']] fig = go.Figure(data=[go.Table(columnorder=[1, 2, 3], columnwidth=[60, 80, 400], header=dict(values=[['<b>Dataset</b>'], ['<b>Columns Names</b>'], ['<b>Description</b>']], line_color='darkslategray', fill_color='royalblue', align=['left', 'center'], font=dict(color='white', size=15), height=40), cells=dict(values=values, line_color='darkslategray', fill=dict(color=['paleturquoise', 'white']), align=['left', 'center'], font_size=12, height=30))]) import pandas as pd district_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(district_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[district_data['district_id'], district_data['state'], district_data['locale'], district_data['pct_black/hispanic'], district_data['pct_free/reduced'], district_data['county_connections_ratio'], district_data['pp_total_raw']], fill_color=[['white', 'lavender'] * len(district_data)], align='left'))]) product_data = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') fig = go.Figure(data=[go.Table(header=dict(values=list(product_data.columns), fill_color='paleturquoise', align='left'), cells=dict(values=[product_data['LP ID'], product_data['URL'], product_data['Product Name'], product_data['Provider/Company Name'], product_data['Sector(s)'], product_data['Primary Essential Function']], fill_color=[['white', 'lavender'] * len(product_data)], align='left'))]) all_csv = [] for district in district_data['district_id']: df = pd.read_csv(f'/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/{district}.csv') df.insert(0, 'district_id', district) all_csv.append(df) engagement_data = pd.concat(all_csv) engagement_data.head()
code
50232057/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe lebron = df.loc[df['Player'] == 'Lebron James'] lebron a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) jordan['Date'] = pd.to_datetime(jordan['Date']) sns.boxplot(jordan['Date'].dt.year, jordan['PTS'], color='red').set_title('Jordan') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) kobe['Date'] = pd.to_datetime(kobe['Date']) sns.boxplot(kobe['Date'].dt.year, kobe['PTS'], color='purple').set_title('Kobe') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) lebron['Date'] = pd.to_datetime(lebron['Date']) sns.boxplot(lebron['Date'].dt.year, lebron['PTS'], color='yellow').set_title('Lebron') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) sns.lineplot(jordan['Date'].dt.year.apply(lambda x: x - 1983), jordan['PTS'], color='red') sns.lineplot(kobe['Date'].dt.year.apply(lambda x: x - 1995), kobe['PTS'], color='purple') sns.lineplot(lebron['Date'].dt.year.apply(lambda x: x - 2002), lebron['PTS'], color='yellow').set_title('Jordan = Red / Kobe = Purple / Lebron = Yellow') ax.set(xlabel='NBA Career in Years', ylabel='Points Scored per Game')
code
50232057/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan print(f"--- Jordan ---\nMin: {jordan['PTS'].min()} \nMax: {jordan['PTS'].max()} \nAvg: {round(jordan['PTS'].mean())}")
code
50232057/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe
code
50232057/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe lebron = df.loc[df['Player'] == 'Lebron James'] lebron
code
50232057/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe lebron = df.loc[df['Player'] == 'Lebron James'] lebron a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) jordan['Date'] = pd.to_datetime(jordan['Date']) sns.boxplot(jordan['Date'].dt.year, jordan['PTS'], color='red').set_title('Jordan') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) kobe['Date'] = pd.to_datetime(kobe['Date']) sns.boxplot(kobe['Date'].dt.year, kobe['PTS'], color='purple').set_title('Kobe') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) lebron['Date'] = pd.to_datetime(lebron['Date']) sns.boxplot(lebron['Date'].dt.year, lebron['PTS'], color='yellow').set_title('Lebron')
code
50232057/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan
code
50232057/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) jordan['Date'] = pd.to_datetime(jordan['Date']) sns.boxplot(jordan['Date'].dt.year, jordan['PTS'], color='red').set_title('Jordan') a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) kobe['Date'] = pd.to_datetime(kobe['Date']) sns.boxplot(kobe['Date'].dt.year, kobe['PTS'], color='purple').set_title('Kobe')
code
50232057/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe lebron = df.loc[df['Player'] == 'Lebron James'] lebron print(f"--- Lebron ---\nMin: {lebron['PTS'].min()} \nMax: {lebron['PTS'].max()} \nAvg: {round(lebron['PTS'].mean())}")
code
50232057/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan a4_dims = (11.7, 8.27) fig, ax = plt.subplots(figsize=a4_dims) jordan['Date'] = pd.to_datetime(jordan['Date']) sns.boxplot(jordan['Date'].dt.year, jordan['PTS'], color='red').set_title('Jordan')
code
50232057/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') jordan = df.loc[df['Player'] == 'Michael Jordan'] jordan kobe = df.loc[df['Player'] == 'Kobe Bryant'] kobe print(f"--- Kobe ---\nMin: {kobe['PTS'].min()} \nMax: {kobe['PTS'].max()} \nAvg: {round(kobe['PTS'].mean())}")
code
50232057/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') df = pd.read_csv('../input/michael-jordan-kobe-bryant-and-lebron-james-stats/allgames_stats.csv') df.head()
code
122260915/cell_13
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from scipy import ndimage from skimage import color import imageio import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra images = ['peppers.png', 'cameraman.tif', 'coins.png'] path = '/kaggle/input/lab-python/Immagini/' I = imageio.imread(path + images[0]) if len(I.shape) == 3: I = color.rgb2gray(I) (plt.subplot(1, 2, 1), plt.imshow(I), plt.title('original image')) (plt.subplot(1, 2, 2), plt.imshow(I, cmap='gray'), plt.title('gray version')) side_box = 3 h_box = np.ones((side_box, side_box)) / side_box ** 2 sigma = 3 side_gauss = 33 [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) h_gauss = np.exp(-0.5 * (x ** 2 + y ** 2) / sigma ** 2) / (2 * np.pi * sigma ** 2) h_gauss = h_gauss / np.sum(h_gauss) temp = np.zeros((side_box, side_box)) temp[side_box // 2, side_box // 2] = 1 H_box = ndimage.uniform_filter(temp, side_box) temp = np.zeros((side_gauss, side_gauss)) temp[side_gauss // 2, side_gauss // 2] = 1 H_gauss = ndimage.gaussian_filter(temp, sigma) (plt.subplot(121), plt.imshow(h_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) (plt.subplot(122), plt.imshow(H_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) (plt.subplot(121), plt.imshow(h_gauss, cmap='gray')) (plt.subplot(122), plt.imshow(H_gauss, cmap='gray')) def box_car(side_box): h_box = np.ones((side_box, side_box)) / side_box ** 2 return h_box h_box_f = box_car(3) def gauss(side_gauss, sigma): [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) h_gauss = np.exp(-0.5 * (x ** 2 + y ** 2) / sigma ** 2) / (2 * np.pi * sigma ** 2) h_gauss = h_gauss / np.sum(h_gauss) return h_gauss I_box = ndimage.convolve(I, h_box) I_gauss = ndimage.convolve(I, h_gauss) plt.figure() (plt.subplot(131), plt.imshow(I, cmap='gray'), plt.title('originale')) (plt.subplot(132), plt.imshow(I_box, cmap='gray'), plt.title('Box')) (plt.subplot(133), plt.imshow(I_gauss, cmap='gray'), plt.title('Gauss')) I_box_nd = ndimage.uniform_filter(I, side_box) I_gauss_nd = ndimage.gaussian_filter(I, sigma=sigma) plt.figure() (plt.subplot(131), plt.imshow(I, cmap='gray', vmin=0, vmax=1), plt.title('originale')) (plt.subplot(132), plt.imshow(I_box_nd, cmap='gray', vmin=0, vmax=1), plt.title('Box')) (plt.subplot(133), plt.imshow(I_gauss_nd, cmap='gray', vmin=0, vmax=1), plt.title('Gauss'))
code
122260915/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
from scipy import ndimage from skimage import color import imageio import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra images = ['peppers.png', 'cameraman.tif', 'coins.png'] path = '/kaggle/input/lab-python/Immagini/' I = imageio.imread(path + images[0]) if len(I.shape) == 3: I = color.rgb2gray(I) (plt.subplot(1, 2, 1), plt.imshow(I), plt.title('original image')) (plt.subplot(1, 2, 2), plt.imshow(I, cmap='gray'), plt.title('gray version')) side_box = 3 h_box = np.ones((side_box, side_box)) / side_box ** 2 sigma = 3 side_gauss = 33 [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) h_gauss = np.exp(-0.5 * (x ** 2 + y ** 2) / sigma ** 2) / (2 * np.pi * sigma ** 2) h_gauss = h_gauss / np.sum(h_gauss) temp = np.zeros((side_box, side_box)) temp[side_box // 2, side_box // 2] = 1 H_box = ndimage.uniform_filter(temp, side_box) temp = np.zeros((side_gauss, side_gauss)) temp[side_gauss // 2, side_gauss // 2] = 1 H_gauss = ndimage.gaussian_filter(temp, sigma) plt.figure() (plt.subplot(121), plt.imshow(h_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) (plt.subplot(122), plt.imshow(H_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) plt.figure() (plt.subplot(121), plt.imshow(h_gauss, cmap='gray')) (plt.subplot(122), plt.imshow(H_gauss, cmap='gray'))
code
122260915/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122260915/cell_7
[ "image_output_2.png", "image_output_1.png" ]
from skimage import color import imageio import matplotlib.pyplot as plt images = ['peppers.png', 'cameraman.tif', 'coins.png'] path = '/kaggle/input/lab-python/Immagini/' I = imageio.imread(path + images[0]) if len(I.shape) == 3: I = color.rgb2gray(I) plt.figure() (plt.subplot(1, 2, 1), plt.imshow(I), plt.title('original image')) (plt.subplot(1, 2, 2), plt.imshow(I, cmap='gray'), plt.title('gray version'))
code
122260915/cell_15
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from scipy import ndimage from skimage import color import imageio import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra images = ['peppers.png', 'cameraman.tif', 'coins.png'] path = '/kaggle/input/lab-python/Immagini/' I = imageio.imread(path + images[0]) if len(I.shape) == 3: I = color.rgb2gray(I) (plt.subplot(1, 2, 1), plt.imshow(I), plt.title('original image')) (plt.subplot(1, 2, 2), plt.imshow(I, cmap='gray'), plt.title('gray version')) side_box = 3 h_box = np.ones((side_box, side_box)) / side_box ** 2 sigma = 3 side_gauss = 33 [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) h_gauss = np.exp(-0.5 * (x ** 2 + y ** 2) / sigma ** 2) / (2 * np.pi * sigma ** 2) h_gauss = h_gauss / np.sum(h_gauss) temp = np.zeros((side_box, side_box)) temp[side_box // 2, side_box // 2] = 1 H_box = ndimage.uniform_filter(temp, side_box) temp = np.zeros((side_gauss, side_gauss)) temp[side_gauss // 2, side_gauss // 2] = 1 H_gauss = ndimage.gaussian_filter(temp, sigma) (plt.subplot(121), plt.imshow(h_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) (plt.subplot(122), plt.imshow(H_box, cmap='gray', vmin=-1 / side_box ** 2, vmax=2 / side_box ** 2)) (plt.subplot(121), plt.imshow(h_gauss, cmap='gray')) (plt.subplot(122), plt.imshow(H_gauss, cmap='gray')) def box_car(side_box): h_box = np.ones((side_box, side_box)) / side_box ** 2 return h_box h_box_f = box_car(3) def gauss(side_gauss, sigma): [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) h_gauss = np.exp(-0.5 * (x ** 2 + y ** 2) / sigma ** 2) / (2 * np.pi * sigma ** 2) h_gauss = h_gauss / np.sum(h_gauss) return h_gauss I_box = ndimage.convolve(I, h_box) I_gauss = ndimage.convolve(I, h_gauss) (plt.subplot(131), plt.imshow(I, cmap='gray'), plt.title('originale')) (plt.subplot(132), plt.imshow(I_box, cmap='gray'), plt.title('Box')) (plt.subplot(133), plt.imshow(I_gauss, cmap='gray'), plt.title('Gauss')) I_box_nd = ndimage.uniform_filter(I, side_box) I_gauss_nd = ndimage.gaussian_filter(I, sigma=sigma) (plt.subplot(131), plt.imshow(I, cmap='gray', vmin=0, vmax=1), plt.title('originale')) (plt.subplot(132), plt.imshow(I_box_nd, cmap='gray', vmin=0, vmax=1), plt.title('Box')) (plt.subplot(133), plt.imshow(I_gauss_nd, cmap='gray', vmin=0, vmax=1), plt.title('Gauss')) sides = [3, 19, 25, 33] sigmas = [2, 7, 11, 15] lista_box = [] lista_gauss = [] for s in sides: h_box = np.ones((s, s)) / s ** 2 I_box = ndimage.convolve(I, h_box) lista_box.append(I_box) side_gauss = 33 [x, y] = np.meshgrid(np.arange(-side_gauss // 2, side_gauss // 2 + 1), np.arange(-side_gauss // 2, side_gauss // 2 + 1)) for sig in sigmas: h_gauss = np.exp(-(x ** 2 + y ** 2) / 2 / sig ** 2) / (2 * np.pi * sig ** 2) h_gauss = h_gauss / np.sum(h_gauss) I_gauss = ndimage.convolve(I, h_gauss) lista_gauss.append(I_gauss) K = len(sides) plt.figure() (plt.subplot(1, K + 1, 1), plt.imshow(I, cmap='gray')) for k in range(K): (plt.subplot(1, K + 1, k + 2), plt.imshow(lista_box[k], cmap='gray')) plt.figure() (plt.subplot(1, K + 1, 1), plt.imshow(I, cmap='gray')) for k in range(K): (plt.subplot(1, K + 1, k + 2), plt.imshow(lista_gauss[k], cmap='gray'))
code
1006144/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) sns.jointplot(x=df['growth_rate'], y=budget['budget'])
code
1006144/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] sns.distplot(budget['budget'])
code
1006144/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) gross = movie[['title_year', 'gross']].dropna(axis=0) gross = gross[gross['title_year'] > 1960] temp = pd.concat([df['growth_rate'], gross['gross']], axis=1) temp = temp.dropna() temp.head()
code
1006144/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gross = movie[['title_year', 'gross']].dropna(axis=0) gross = gross[gross['title_year'] > 1960] gross.describe()
code
1006144/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) sns.jointplot(x='title_year', y='growth_rate', data=df)
code
1006144/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) gross = movie[['title_year', 'gross']].dropna(axis=0) gross = gross[gross['title_year'] > 1960] temp = pd.concat([df['growth_rate'], gross['gross']], axis=1) temp = temp.dropna() sns.set(style='darkgrid') sns.lmplot(x='growth_rate', y='gross', data=temp)
code
1006144/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1006144/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') movie['budget'].max()
code
1006144/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') movie.describe()
code
1006144/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() sns.distplot(gdp_stats['mean'])
code
1006144/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') gdp.describe()
code
1006144/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) sns.jointplot(x='title_year', y='budget', data=budget)
code
1006144/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] sns.jointplot(x='title_year', y='budget', data=budget)
code
1006144/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') gdp_stats= gdp.describe().T gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() gdp_stats.head()
code
1006144/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget.head()
code
1006144/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) sns.jointplot(x=df['growth_rate'], y=budget['budget'])
code
1006144/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') gdp_stats= gdp.describe().T gdp_stats.head()
code
1006144/cell_27
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') movie = pd.read_csv('../input/movie_metadata.csv') gdp_stats= gdp.describe().T budget = movie[movie['budget'] < 300000000] budget = budget[budget['budget'].isnull() == False] budget = pd.concat([budget['title_year'], budget['budget']], axis=1) budget = budget.dropna(axis=0) budget = budget[budget['title_year'] > 1960] gdp_stats = gdp_stats.reset_index() gdp_stats = gdp_stats.dropna() df = gdp_stats.copy() df['index'] = df['index'].astype(str).astype(int) df = df.rename(columns={'index': 'title_year'}) df = df.rename(columns={'mean': 'growth_rate'}) gross = movie[['title_year', 'gross']].dropna(axis=0) gross = gross[gross['title_year'] > 1960] temp = pd.concat([df['growth_rate'], gross['gross']], axis=1) temp = temp.dropna() temp['gross'] = temp['gross'] / 100000 temp.head()
code
1006144/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gdp = pd.read_csv('../input/worldGDP_growth2.csv', encoding='ISO-8859-1') gdp
code
73083438/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.isnull().sum() features = train.drop(['target'], axis=1) list(test.columns) == list(features.columns)
code
73083438/cell_6
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.describe()
code