path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128012943/cell_28
[ "image_output_1.png" ]
x_test.shape
code
128012943/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') df_insure['sex'].value_counts()
code
128012943/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') sns.displot(data=df_insure['expenses']) plt.show()
code
128012943/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') df_insure.groupby('smoker')['bmi'].mean().plot(kind='bar') plt.ylabel('Average BMI') plt.show()
code
128012943/cell_35
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') x_train.shape x_test.shape from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(drop='first') x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray() x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray() x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_trainfeatures x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_testfeatures x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1) x_train_new x_train_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True) x_train_new
code
128012943/cell_31
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder x_train.shape x_test.shape from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(drop='first') x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray() x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray() ohe.categories_
code
128012943/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') sns.displot(data=df_insure['bmi']) plt.show()
code
128012943/cell_27
[ "image_output_1.png" ]
x_test.head()
code
128012943/cell_37
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') x_train.shape x_test.shape from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(drop='first') x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray() x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray() x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_trainfeatures x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_testfeatures x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1) x_train_new x_test_new = pd.concat([x_test, x_testfeatures.set_axis(x_test.index)], axis=1) x_test_new x_test_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True) x_test_new
code
128012943/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') df_insure.info()
code
128012943/cell_36
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv') x_train.shape x_test.shape from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(drop='first') x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray() x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray() x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_trainfeatures x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest']) x_testfeatures x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1) x_train_new x_test_new = pd.concat([x_test, x_testfeatures.set_axis(x_test.index)], axis=1) x_test_new
code
318069/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False)
code
318069/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) print("Attack with most victims happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_victim.City.values[0], most_victim.index.strftime('%B %d,%Y')[0], most_victim.Killed, most_victim.Injured, most_victim.Victims, '%s' % most_victim.Description.values[0])) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) print("Attack with the most deaths happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_killed.City.values[0], most_killed.index.strftime('%B %d,%Y')[0], most_killed.Killed, most_killed.Injured, most_killed.Victims, '%s' % most_killed.Description.values[0])) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) print("Attack with the most injuries happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_injuries.City.values[0], most_injuries.index.strftime('%B %d,%Y')[0], most_injuries.Killed, most_injuries.Injured, most_injuries.Victims, '%s' % most_injuries.Description.values[0]))
code
318069/cell_26
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from matplotlib.pylab import rcParams import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False) killedbyday = dfc.groupby([dfc.index.map(lambda x: x.weekday), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbyday.unstack(level=0).plot(kind='bar', subplots=False) killedbyday.unstack(level=1).plot(kind='bar', subplots=False)
code
318069/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() print('%s is ranked %.0f with %d attacks resulting to %d deaths and %d injuries' % (country, country_rank, country_attacks, country_killed, country_injured))
code
318069/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True)
code
318069/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.City.value_counts().plot(kind='bar', figsize=(17, 7)) plt.title('Number of attacks by city')
code
318069/cell_27
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from matplotlib.pylab import rcParams import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False) killedbyday = dfc.groupby([dfc.index.map(lambda x: x.weekday), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbymonth = dfc.groupby([dfc.index.map(lambda x: x.month), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbymonth.unstack(level=0).plot(kind='bar', subplots=False) killedbymonth.unstack(level=1).plot(kind='bar', subplots=False)
code
106209369/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.metrics import r2_score import lightgbm as lgbm import numpy as np import pandas as pd import pandas as pd import numpy as np import lightgbm as lgbm x_train = pd.read_csv('../input/regression-datasets/X_train_reg.csv') y_train = pd.read_csv('../input/regression-datasets/y_train_reg.csv') def get_dataset(df, target): L = df.shape[0] cv_test1 = df.iloc[:round(L / 5), :] cv_test2 = df.iloc[round(L / 5):round(2 * L / 5), :] cv_test3 = df.iloc[round(2 * L / 5):round(3 * L / 5), :] cv_test4 = df.iloc[round(3 * L / 5):round(4 * L / 5), :] cv_test5 = df.iloc[round(4 * L / 5):, :] cv_train1 = pd.concat([cv_test2, cv_test3, cv_test4, cv_test5], axis=0) cv_train2 = pd.concat([cv_test1, cv_test3, cv_test4, cv_test5], axis=0) cv_train3 = pd.concat([cv_test1, cv_test2, cv_test4, cv_test5], axis=0) cv_train4 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) cv_train5 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) T1 = target.iloc[0:round(L / 5)] T2 = target.iloc[round(L / 5):round(2 * L / 5)] T3 = target.iloc[round(2 * L / 5):round(3 * L / 5)] T4 = target.iloc[round(3 * L / 5):round(4 * L / 5)] T5 = target.iloc[round(4 * L / 5):] t1 = pd.concat([T2, T3, T4, T5], axis=0) t2 = pd.concat([T1, T3, T4, T5], axis=0) t3 = pd.concat([T1, T2, T4, T5], axis=0) t4 = pd.concat([T1, T2, T3, T5], axis=0) t5 = pd.concat([T1, T2, T3, T4], axis=0) cv_test = [cv_test1, cv_test2, cv_test3, cv_test4, cv_test5] cv_train = [cv_train1, cv_train2, cv_train3, cv_train4, cv_train5] T = [T1, T2, T3, T4, T5] t = [t1, t2, t3, t4, t5] return (cv_test, cv_train, T, t) val = get_dataset(x_train, y_train) def scorer(A): score = 0 for i in range(0, 5): model = lgbm.LGBMRegressor(n_estimators=100, random_state=42) model.fit(A[1][i], np.array(A[3][i])) pred = model.predict(A[0][i]) from sklearn.metrics import r2_score S = r2_score(pred, A[2][i]) score = score + S return score / 5 all_score = scorer(val) all_score
code
106209369/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import lightgbm as lgbm x_train = pd.read_csv('../input/regression-datasets/X_train_reg.csv') y_train = pd.read_csv('../input/regression-datasets/y_train_reg.csv') x_train.head()
code
106209369/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import lightgbm as lgbm x_train = pd.read_csv('../input/regression-datasets/X_train_reg.csv') y_train = pd.read_csv('../input/regression-datasets/y_train_reg.csv')
code
106209369/cell_14
[ "text_html_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.metrics import r2_score import lightgbm as lgbm import numpy as np import pandas as pd import warnings import warnings import pandas as pd import numpy as np import lightgbm as lgbm x_train = pd.read_csv('../input/regression-datasets/X_train_reg.csv') y_train = pd.read_csv('../input/regression-datasets/y_train_reg.csv') def get_dataset(df, target): L = df.shape[0] cv_test1 = df.iloc[:round(L / 5), :] cv_test2 = df.iloc[round(L / 5):round(2 * L / 5), :] cv_test3 = df.iloc[round(2 * L / 5):round(3 * L / 5), :] cv_test4 = df.iloc[round(3 * L / 5):round(4 * L / 5), :] cv_test5 = df.iloc[round(4 * L / 5):, :] cv_train1 = pd.concat([cv_test2, cv_test3, cv_test4, cv_test5], axis=0) cv_train2 = pd.concat([cv_test1, cv_test3, cv_test4, cv_test5], axis=0) cv_train3 = pd.concat([cv_test1, cv_test2, cv_test4, cv_test5], axis=0) cv_train4 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) cv_train5 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) T1 = target.iloc[0:round(L / 5)] T2 = target.iloc[round(L / 5):round(2 * L / 5)] T3 = target.iloc[round(2 * L / 5):round(3 * L / 5)] T4 = target.iloc[round(3 * L / 5):round(4 * L / 5)] T5 = target.iloc[round(4 * L / 5):] t1 = pd.concat([T2, T3, T4, T5], axis=0) t2 = pd.concat([T1, T3, T4, T5], axis=0) t3 = pd.concat([T1, T2, T4, T5], axis=0) t4 = pd.concat([T1, T2, T3, T5], axis=0) t5 = pd.concat([T1, T2, T3, T4], axis=0) cv_test = [cv_test1, cv_test2, cv_test3, cv_test4, cv_test5] cv_train = [cv_train1, cv_train2, cv_train3, cv_train4, cv_train5] T = [T1, T2, T3, T4, T5] t = [t1, t2, t3, t4, t5] return (cv_test, cv_train, T, t) val = get_dataset(x_train, y_train) def scorer(A): score = 0 for i in range(0, 5): model = lgbm.LGBMRegressor(n_estimators=100, random_state=42) model.fit(A[1][i], np.array(A[3][i])) pred = model.predict(A[0][i]) from sklearn.metrics import r2_score S = r2_score(pred, A[2][i]) score = score + S return score / 5 all_score = scorer(val) all_score import warnings warnings.filterwarnings(action='ignore', category=UserWarning) check = x_train max_score = 1 all_score = scorer(val) drop = list() while max_score >= all_score: col_list = list() score_list = list() for col in check.columns: col_list.append(col) temp = check.drop(col, axis=1) B = get_dataset(temp, y_train) s = scorer(B) score_list.append(s) val_df = pd.DataFrame(list(zip(col_list, score_list)), columns=['col_list', 'score_list']) val_df = val_df.sort_values(by='score_list', ascending=False) max_col = val_df.iloc[0, 0] max_score = val_df.iloc[0, 1] if max_score >= all_score: drop.append(max_col) all_score = max_score check = check.drop(max_col, axis=1) def auto_selector(x_train, y_train): def get_dataset(df, target): L = df.shape[0] cv_test1 = df.iloc[:round(L / 5), :] cv_test2 = df.iloc[round(L / 5):round(2 * L / 5), :] cv_test3 = df.iloc[round(2 * L / 5):round(3 * L / 5), :] cv_test4 = df.iloc[round(3 * L / 5):round(4 * L / 5), :] cv_test5 = df.iloc[round(4 * L / 5):, :] cv_train1 = pd.concat([cv_test2, cv_test3, cv_test4, cv_test5], axis=0) cv_train2 = pd.concat([cv_test1, cv_test3, cv_test4, cv_test5], axis=0) cv_train3 = pd.concat([cv_test1, cv_test2, cv_test4, cv_test5], axis=0) cv_train4 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) cv_train5 = pd.concat([cv_test1, cv_test2, cv_test3, cv_test5], axis=0) T1 = target.iloc[0:round(L / 5)] T2 = target.iloc[round(L / 5):round(2 * L / 5)] T3 = target.iloc[round(2 * L / 5):round(3 * L / 5)] T4 = target.iloc[round(3 * L / 5):round(4 * L / 5)] T5 = target.iloc[round(4 * L / 5):] t1 = pd.concat([T2, T3, T4, T5], axis=0) t2 = pd.concat([T1, T3, T4, T5], axis=0) t3 = pd.concat([T1, T2, T4, T5], axis=0) t4 = pd.concat([T1, T2, T3, T5], axis=0) t5 = pd.concat([T1, T2, T3, T4], axis=0) cv_test = [cv_test1, cv_test2, cv_test3, cv_test4, cv_test5] cv_train = [cv_train1, cv_train2, cv_train3, cv_train4, cv_train5] T = [T1, T2, T3, T4, T5] t = [t1, t2, t3, t4, t5] return (cv_test, cv_train, T, t) val = get_dataset(x_train, y_train) def scorer(A): score = 0 for i in range(0, 5): model = lgbm.LGBMRegressor(n_estimators=100, random_state=42) model.fit(A[1][i], np.array(A[3][i])) pred = model.predict(A[0][i]) from sklearn.metrics import r2_score S = r2_score(pred, A[2][i]) score = score + S return score / 5 import warnings warnings.filterwarnings(action='ignore', category=UserWarning) check = x_train max_score = 1 all_score = scorer(val) drop = list() while max_score >= all_score: col_list = list() score_list = list() for col in check.columns: col_list.append(col) temp = check.drop(col, axis=1) B = get_dataset(temp, y_train) s = scorer(B) score_list.append(s) val_df = pd.DataFrame(list(zip(col_list, score_list)), columns=['col_list', 'score_list']) val_df = val_df.sort_values(by='score_list', ascending=False) max_col = val_df.iloc[0, 0] max_score = val_df.iloc[0, 1] if max_score >= all_score: drop.append(max_col) all_score = max_score check = check.drop(max_col, axis=1) return (x_train.drop(drop, axis=1), y_train) final_df = auto_selector(x_train, y_train) (final_df[0].shape, final_df[1].shape)
code
2041508/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.head() spotify.shape spotify.dtypes
code
2041508/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore' top_globe2 = top_globe2[0:3] top_usa2 = top_usa2[0:3] top_great_britain2 = top_great_britain2[0:3] top_mexico2 = top_mexico2[0:3] top_taiwan2 = top_taiwan2[0:3] top_singapore2 = top_singapore2[0:3] top_all_merged2 = top_globe2.append([top_usa2, top_great_britain2, top_mexico2, top_taiwan2, top_singapore2])
code
2041508/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
import plotly.graph_objs as go import plotly.plotly as py import plotly.plotly as py import plotly.plotly as py from plotly.graph_objs import * trace1 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [2.54, 1.45, 2.47, 1.75, 2.11, 2.7], 'name': 'Shape of You', 'type': 'bar', 'uid': 'd81641', 'visible': True, 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:593809'} trace2 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [1.48, 0, 1.4, 0, 0, 1.48], 'name': 'Despacito - Remix', 'type': 'bar', 'uid': 'c15c84', 'visible': True, 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:724a8c'} trace3 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 0, 0, 0, 1.97, 1.78], 'name': 'Something Just Like This', 'type': 'bar', 'uid': '1dbc1b', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:31cdb8'} trace4 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [1.3, 0, 0, 2.22, 0, 0], 'name': 'Despacito (Featuring Daddy Yankee)', 'type': 'bar', 'uid': 'c6b042', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:4583e2'} trace5 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 0, 0, 2.16, 0, 0], 'name': 'Me Rehúso', 'type': 'bar', 'uid': 'be7d95', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:b9ea50'} trace6 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 1.2, 0, 0, 0, 0], 'name': 'Mask Off', 'type': 'bar', 'uid': '60d6b8', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:989da6'} trace7 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 0, 0, 0, 1.23, 0], 'name': '演員', 'type': 'bar', 'uid': 'f912b1', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:cd61ac'} trace8 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 0, 1.67, 0, 0, 0], 'name': 'Castle on the Hill', 'type': 'bar', 'uid': 'c01a7b', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:083ede'} trace9 = {'x': ['Global', 'USA', 'Great Britain', 'Mexico', 'Taiwan', 'Singapore'], 'y': [0, 1.51, 0, 0, 0, 0], 'name': 'HUMBLE.', 'type': 'bar', 'uid': 'd9ea4a', 'xsrc': 'sweetmusicality:7:ff97bb', 'ysrc': 'sweetmusicality:7:1008dc'} data = [trace1, trace2, trace3, trace4, trace5, trace6, trace7, trace8, trace9] layout = {'annotations': [{'x': 1.09648221896, 'y': 0.671878877124, 'font': {'size': 21}, 'showarrow': False, 'text': '<b>Song</b>', 'xanchor': 'middle', 'xref': 'paper', 'yanchor': 'bottom', 'yref': 'paper'}], 'autosize': True, 'barmode': 'stack', 'font': {'size': 18}, 'hovermode': 'closest', 'legend': {'x': 1.01935845381, 'y': 0.673239347844, 'borderwidth': 0, 'orientation': 'v', 'traceorder': 'normal'}, 'margin': {'b': 80}, 'title': '<b>Top 3 Streamed Songs on Spotify from Jan 2017 - Aug 2017 by Country</b>', 'titlefont': {'size': 28}, 'xaxis': {'autorange': False, 'domain': [0, 1.01], 'range': [-0.5, 5.51343670089], 'side': 'bottom', 'title': '<b>Country</b>', 'type': 'category'}, 'yaxis': {'anchor': 'x', 'autorange': False, 'domain': [-0.01, 1], 'range': [0, 6.66421250763], 'title': '<b>% this song was streamed in its country</b>', 'type': 'linear'}} fig = go.Figure(data=data, layout=layout) py.iplot(fig)
code
2041508/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore' top_globe2['prop'] = top_globe2['Streams'] / sum(top_globe2['Streams']) * 100 top_usa2['prop'] = top_usa2['Streams'] / sum(top_usa2['Streams']) * 100 top_great_britain2['prop'] = top_great_britain2['Streams'] / sum(top_great_britain2['Streams']) * 100 top_mexico2['prop'] = top_mexico2['Streams'] / sum(top_mexico2['Streams']) * 100 top_taiwan2['prop'] = top_taiwan2['Streams'] / sum(top_taiwan2['Streams']) * 100 top_singapore2['prop'] = top_singapore2['Streams'] / sum(top_singapore2['Streams']) * 100
code
2041508/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore' top_globe2 = top_globe2[0:3] top_usa2 = top_usa2[0:3] top_great_britain2 = top_great_britain2[0:3] top_mexico2 = top_mexico2[0:3] top_taiwan2 = top_taiwan2[0:3] top_singapore2 = top_singapore2[0:3] del top_globe2['Streams'] del top_usa2['Streams'] del top_great_britain2['Streams'] del top_mexico2['Streams'] del top_taiwan2['Streams'] del top_singapore2['Streams']
code
2041508/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe = top_globe[0:3] top_usa = top_usa[0:3] top_great_britain = top_great_britain[0:3] top_mexico = top_mexico[0:3] top_taiwan = top_taiwan[0:3] top_singapore = top_singapore[0:3] top_all_merged = top_globe.append([top_usa, top_great_britain, top_mexico, top_taiwan, top_singapore]) top_all_merged = top_all_merged.reset_index() all_songs = top_all_merged['Track Name'].value_counts() all_songs = all_songs.reset_index() len(top_all_merged['Track Name'].value_counts())
code
2041508/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes
code
2041508/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe = top_globe[0:3] top_usa = top_usa[0:3] top_great_britain = top_great_britain[0:3] top_mexico = top_mexico[0:3] top_taiwan = top_taiwan[0:3] top_singapore = top_singapore[0:3] top_all_merged = top_globe.append([top_usa, top_great_britain, top_mexico, top_taiwan, top_singapore]) top_all_merged = top_all_merged.reset_index()
code
2041508/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe = top_globe[0:3] top_usa = top_usa[0:3] top_great_britain = top_great_britain[0:3] top_mexico = top_mexico[0:3] top_taiwan = top_taiwan[0:3] top_singapore = top_singapore[0:3] top_all_merged = top_globe.append([top_usa, top_great_britain, top_mexico, top_taiwan, top_singapore])
code
2041508/cell_32
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore' top_globe2 = top_globe2[0:3] top_usa2 = top_usa2[0:3] top_great_britain2 = top_great_britain2[0:3] top_mexico2 = top_mexico2[0:3] top_taiwan2 = top_taiwan2[0:3] top_singapore2 = top_singapore2[0:3]
code
2041508/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore'
code
2041508/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes spotify['Region'].value_counts() len(spotify['Region'].value_counts())
code
2041508/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe = top_globe[0:3] top_usa = top_usa[0:3] top_great_britain = top_great_britain[0:3] top_mexico = top_mexico[0:3] top_taiwan = top_taiwan[0:3] top_singapore = top_singapore[0:3]
code
2041508/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv')
code
2041508/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe = top_globe[0:3] top_usa = top_usa[0:3] top_great_britain = top_great_britain[0:3] top_mexico = top_mexico[0:3] top_taiwan = top_taiwan[0:3] top_singapore = top_singapore[0:3] del top_globe['Streams'] del top_usa['Streams'] del top_great_britain['Streams'] del top_mexico['Streams'] del top_taiwan['Streams'] del top_singapore['Streams']
code
2041508/cell_35
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe2 = globe.groupby('Artist').agg({'Streams': 'sum'}) top_globe2 = top_globe2.sort_values(['Streams'], ascending=False) top_globe2['country'] = 'Globe' top_usa2 = usa.groupby('Artist').agg({'Streams': 'sum'}) top_usa2 = top_usa2.sort_values(['Streams'], ascending=False) top_usa2['country'] = 'USA' top_great_britain2 = great_britain.groupby('Artist').agg({'Streams': 'sum'}) top_great_britain2 = top_great_britain2.sort_values(['Streams'], ascending=False) top_great_britain2['country'] = 'Great Britain' top_mexico2 = mexico.groupby('Artist').agg({'Streams': 'sum'}) top_mexico2 = top_mexico2.sort_values(['Streams'], ascending=False) top_mexico2['country'] = 'Mexico' top_taiwan2 = taiwan.groupby('Artist').agg({'Streams': 'sum'}) top_taiwan2 = top_taiwan2.sort_values(['Streams'], ascending=False) top_taiwan2['country'] = 'Taiwan' top_singapore2 = singapore.groupby('Artist').agg({'Streams': 'sum'}) top_singapore2 = top_singapore2.sort_values(['Streams'], ascending=False) top_singapore2['country'] = 'Singapore' top_globe2 = top_globe2[0:3] top_usa2 = top_usa2[0:3] top_great_britain2 = top_great_britain2[0:3] top_mexico2 = top_mexico2[0:3] top_taiwan2 = top_taiwan2[0:3] top_singapore2 = top_singapore2[0:3] top_all_merged2 = top_globe2.append([top_usa2, top_great_britain2, top_mexico2, top_taiwan2, top_singapore2]) top_all_merged2 = top_all_merged2.reset_index()
code
2041508/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore' top_globe['prop'] = top_globe['Streams'] / sum(top_globe['Streams']) * 100 top_usa['prop'] = top_usa['Streams'] / sum(top_usa['Streams']) * 100 top_great_britain['prop'] = top_great_britain['Streams'] / sum(top_great_britain['Streams']) * 100 top_mexico['prop'] = top_mexico['Streams'] / sum(top_mexico['Streams']) * 100 top_taiwan['prop'] = top_taiwan['Streams'] / sum(top_taiwan['Streams']) * 100 top_singapore['prop'] = top_singapore['Streams'] / sum(top_singapore['Streams']) * 100
code
2041508/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg']
code
2041508/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd spotify = pd.read_csv('C:\\Users\\KK\\Documents\\Kitu\\College\\Senior Year\\Extracurriculars\\Python\\Spotify\\.spyproject\\data.csv') spotify.shape spotify.dtypes spotify.Region = spotify.Region.astype('category') spotify.Date = pd.to_datetime(spotify['Date']) spotify.dtypes globe = spotify[spotify.Region == 'global'] usa = spotify[spotify.Region == 'us'] great_britain = spotify[spotify.Region == 'gb'] mexico = spotify[spotify.Region == 'mx'] taiwan = spotify[spotify.Region == 'tw'] singapore = spotify[spotify.Region == 'sg'] top_globe = globe.groupby('Track Name').agg({'Streams': 'sum'}) top_globe = top_globe.sort_values(['Streams'], ascending=False) top_globe['country'] = 'Globe' top_usa = usa.groupby('Track Name').agg({'Streams': 'sum'}) top_usa = top_usa.sort_values(['Streams'], ascending=False) top_usa['country'] = 'USA' top_great_britain = great_britain.groupby('Track Name').agg({'Streams': 'sum'}) top_great_britain = top_great_britain.sort_values(['Streams'], ascending=False) top_great_britain['country'] = 'Great Britain' top_mexico = mexico.groupby('Track Name').agg({'Streams': 'sum'}) top_mexico = top_mexico.sort_values(['Streams'], ascending=False) top_mexico['country'] = 'Mexico' top_taiwan = taiwan.groupby('Track Name').agg({'Streams': 'sum'}) top_taiwan = top_taiwan.sort_values(['Streams'], ascending=False) top_taiwan['country'] = 'Taiwan' top_singapore = singapore.groupby('Track Name').agg({'Streams': 'sum'}) top_singapore = top_singapore.sort_values(['Streams'], ascending=False) top_singapore['country'] = 'Singapore'
code
121154376/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns model = sm.OLS.from_formula('Income ~ CCAvg', data=df) inc_ccavg = model.fit() inc_ccavg.summary() model = sm.OLS.from_formula('Income ~ Mortgage', data=df) inc_ccavg = model.fit() inc_ccavg.summary()
code
121154376/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.head()
code
121154376/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns model = sm.OLS.from_formula('Income ~ CCAvg', data=df) inc_ccavg = model.fit() inc_ccavg.summary() model = sm.OLS.from_formula('Income ~ Mortgage', data=df) inc_ccavg = model.fit() inc_ccavg.summary() df.rename(columns={'ID': 'ID', 'Age': 'Age', 'Experience': 'Experience', 'Income': 'Income', 'ZIP Code': 'ZIP Code', 'Family': 'Family', 'CCAvg': 'CCAvg', 'Education': 'Education', 'Mortgage': 'Mortgage', 'Personal Loan': 'PersonalLoan', 'Securities Account': 'SecuritiesAccount', 'CD Account': 'CDAccount', 'Online': 'Online', 'CreditCard': 'CreditCard'}, inplace=True) df['CDAccount'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['PersonalLoan'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['Education'].replace({1: 'Undergrad', 2: 'Graduate', 3: 'Advanced/Professional'}, inplace=True) df['Family'].replace({1: 'One', 2: 'Two', 3: 'Three', 4: 'Four'}, inplace=True) categ_features = ['CDAccount', 'PersonalLoan', 'Education', 'Family'] for i in categ_features: x = 'Income ~' + i model = sm.OLS.from_formula(x, data=df) corr = model.fit() from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error best = -1000 for i in range(5, 600, 5): model = DecisionTreeRegressor(random_state=1, max_leaf_nodes=i) income = model.fit(trainX, trainy) predicted_values = income.predict(valX) mae = -mean_absolute_error(valy, predicted_values) if mae > best: best = mae x = 'model = DecisionTreeRegressor(random_state = 1, max_leaf_nodes = i)\nincome = model.fit(trainX, trainy)\npredicted_values = income.predict(valX)\nmae = mean_absolute_error(valy, predicted_values)' from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error best = -1000 for i in range(5, 700, 10): model = RandomForestRegressor(random_state=1, max_leaf_nodes=i) income = model.fit(trainX, trainy) predicted_values = income.predict(valX) mae = -mean_absolute_error(valy, predicted_values) if mae > best: best = mae print(-best)
code
121154376/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns model = sm.OLS.from_formula('Income ~ CCAvg', data=df) inc_ccavg = model.fit() inc_ccavg.summary()
code
121154376/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns model = sm.OLS.from_formula('Income ~ CCAvg', data=df) inc_ccavg = model.fit() inc_ccavg.summary() model = sm.OLS.from_formula('Income ~ Mortgage', data=df) inc_ccavg = model.fit() inc_ccavg.summary() df.rename(columns={'ID': 'ID', 'Age': 'Age', 'Experience': 'Experience', 'Income': 'Income', 'ZIP Code': 'ZIP Code', 'Family': 'Family', 'CCAvg': 'CCAvg', 'Education': 'Education', 'Mortgage': 'Mortgage', 'Personal Loan': 'PersonalLoan', 'Securities Account': 'SecuritiesAccount', 'CD Account': 'CDAccount', 'Online': 'Online', 'CreditCard': 'CreditCard'}, inplace=True) df['CDAccount'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['PersonalLoan'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['Education'].replace({1: 'Undergrad', 2: 'Graduate', 3: 'Advanced/Professional'}, inplace=True) df['Family'].replace({1: 'One', 2: 'Two', 3: 'Three', 4: 'Four'}, inplace=True) categ_features = ['CDAccount', 'PersonalLoan', 'Education', 'Family'] for i in categ_features: x = 'Income ~' + i model = sm.OLS.from_formula(x, data=df) corr = model.fit() from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error best = -1000 for i in range(5, 600, 5): model = DecisionTreeRegressor(random_state=1, max_leaf_nodes=i) income = model.fit(trainX, trainy) predicted_values = income.predict(valX) mae = -mean_absolute_error(valy, predicted_values) if mae > best: best = mae print(-best) x = 'model = DecisionTreeRegressor(random_state = 1, max_leaf_nodes = i)\nincome = model.fit(trainX, trainy)\npredicted_values = income.predict(valX)\nmae = mean_absolute_error(valy, predicted_values)'
code
121154376/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns
code
121154376/cell_12
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns model = sm.OLS.from_formula('Income ~ CCAvg', data=df) inc_ccavg = model.fit() inc_ccavg.summary() model = sm.OLS.from_formula('Income ~ Mortgage', data=df) inc_ccavg = model.fit() inc_ccavg.summary() df.rename(columns={'ID': 'ID', 'Age': 'Age', 'Experience': 'Experience', 'Income': 'Income', 'ZIP Code': 'ZIP Code', 'Family': 'Family', 'CCAvg': 'CCAvg', 'Education': 'Education', 'Mortgage': 'Mortgage', 'Personal Loan': 'PersonalLoan', 'Securities Account': 'SecuritiesAccount', 'CD Account': 'CDAccount', 'Online': 'Online', 'CreditCard': 'CreditCard'}, inplace=True) df['CDAccount'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['PersonalLoan'].replace({0: 'No', 1: 'Yes'}, inplace=True) df['Education'].replace({1: 'Undergrad', 2: 'Graduate', 3: 'Advanced/Professional'}, inplace=True) df['Family'].replace({1: 'One', 2: 'Two', 3: 'Three', 4: 'Four'}, inplace=True) categ_features = ['CDAccount', 'PersonalLoan', 'Education', 'Family'] for i in categ_features: x = 'Income ~' + i model = sm.OLS.from_formula(x, data=df) corr = model.fit() print(corr.summary()) print('\n\n\n\n\n')
code
121154376/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/personal-loan-modeling/Bank_Personal_Loan_Modelling.csv') df.columns sns.heatmap(np.round(df.corr(), 2), vmin=-1, vmax=1, annot=True, annot_kws={'fontsize': 5, 'fontweight': 'bold'})
code
128031395/cell_42
[ "text_plain_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') y_predict = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy threshold = 0.25 pred_s = rf_model.predict_proba(X_valid) predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_valid, predicted) accuracy
code
128031395/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) import seaborn as sn import matplotlib.pyplot as plt hm = sn.heatmap(df.corr().round(3), cmap='YlGnBu') plt.show()
code
128031395/cell_25
[ "image_output_1.png" ]
from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i, s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show()
code
128031395/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df.info()
code
128031395/cell_30
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy
code
128031395/cell_33
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test)
code
128031395/cell_44
[ "text_plain_output_1.png" ]
from eli5.sklearn import PermutationImportance from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve import eli5 from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') y_predict = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy threshold = 0.25 pred_s = rf_model.predict_proba(X_valid) predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_valid, predicted) accuracy import eli5 from eli5.sklearn import PermutationImportance perm = PermutationImportance(rf_model, random_state=1).fit(X_valid, y_valid) eli5.show_weights(perm, feature_names=X_valid.columns.tolist())
code
128031395/cell_29
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i, s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show()
code
128031395/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') y_predict = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy
code
128031395/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128031395/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include=np.number).transpose() missing = df.isnull().sum() * 100 / len(df) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc, df_missing], axis=1)
code
128031395/cell_51
[ "image_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') y_predict = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy threshold = 0.25 pred_s = rf_model.predict_proba(X_valid) predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_valid, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy
code
128031395/cell_28
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) y_predict = rf_model.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_predict)
code
128031395/cell_16
[ "text_plain_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() df.corr().round(3) X = df.drop('Class', axis=1) y = df['Class'] df_majority = df[df.Class == 0] df_minority = df[df.Class == 1] df_majority_downsampled = resample(df_majority, replace=False, n_samples=100000) df_up_down_sampled = pd.concat([df_minority, df_majority_downsampled]) df_up_down_sampled.shape
code
128031395/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns
code
128031395/cell_17
[ "text_plain_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() df.corr().round(3) X = df.drop('Class', axis=1) y = df['Class'] df_majority = df[df.Class == 0] df_minority = df[df.Class == 1] df_majority_downsampled = resample(df_majority, replace=False, n_samples=100000) df_up_down_sampled = pd.concat([df_minority, df_majority_downsampled]) df_up_down_sampled.shape df_up_down_sampled['Class'].value_counts()
code
128031395/cell_35
[ "image_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[ix], gmeans[ix])) pyplot.plot([0, 1], [0, 1], linestyle='--', label='No Skill') pyplot.plot(fpr, tpr, marker='.', label='Logistic') pyplot.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best') pyplot.xlabel('False Positive Rate') pyplot.ylabel('True Positive Rate') pyplot.legend() pyplot.show()
code
128031395/cell_43
[ "image_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve import shap from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') y_predict = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy threshold = 0.25 pred_s = rf_model.predict_proba(X_valid) predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_valid, predicted) accuracy import shap explainer = shap.TreeExplainer(rf_model) shap_values = explainer.shap_values(X_valid) shap.summary_plot(shap_values[1], X_valid)
code
128031395/cell_31
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i, s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show()
code
128031395/cell_24
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy
code
128031395/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() df.corr().round(3)
code
128031395/cell_37
[ "image_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing df_desc = df.describe(include= np.number).transpose() missing= ((df.isnull().sum())*100)/(len(df)) df_missing = pd.DataFrame(missing, columns=['missing%']) pd.concat([df_desc,df_missing],axis=1) #heatmap import seaborn as sn import matplotlib.pyplot as plt hm=sn.heatmap(df.corr().round(3), cmap="YlGnBu") plt.show() from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) # # Print the confusion matrix using Matplotlib # fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i,s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show() from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.metrics import confusion_matrix conf_matrix = confusion_matrix(y_true=y_test, y_pred=predicted) fig, ax = plt.subplots(figsize=(7.5, 7.5)) ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3) for i in range(conf_matrix.shape[0]): for j in range(conf_matrix.shape[1]): ax.text(x=j, y=i, s=conf_matrix[i, j], va='center', ha='center', size='xx-large') plt.xlabel('Predictions', fontsize=18) plt.ylabel('Actuals', fontsize=18) plt.title('Confusion Matrix', fontsize=18) plt.show()
code
128031395/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv') df.columns df_missing = df.isnull().sum() df_missing
code
128031395/cell_36
[ "text_plain_output_1.png" ]
from matplotlib import pyplot from numpy import argmax from numpy import sqrt from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) pred_s = rf_model.predict_proba(X_test) y_pred = rf_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy threshold = 0.5 predicted = (pred_s[:, 1] >= threshold).astype('int') threshold = 0.25 predicted = (pred_s[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred_lg = classifier.predict(X_test) lr_probs = classifier.predict_proba(X_test) lr_probs = lr_probs[:, 1] from numpy import sqrt from sklearn.metrics import roc_curve from numpy import argmax from matplotlib import pyplot yhat_prob = classifier.predict_proba(X_test) yhat = yhat_prob[:, 1] fpr, tpr, thresholds = roc_curve(y_test, yhat) gmeans = sqrt(tpr * (1 - fpr)) ix = argmax(gmeans) threshold = 0.3 predicted = (yhat_prob[:, 1] >= threshold).astype('int') accuracy = accuracy_score(y_test, predicted) accuracy
code
128016199/cell_4
[ "text_plain_output_1.png" ]
!yolo task=detect mode=predict model=/kaggle/working//runs/detect/train/weights/best.pt conf=0.25 source=/kaggle/input/detect-pv/detect_pv/test/images save=True
code
128016199/cell_2
[ "text_plain_output_1.png" ]
!yolo task=detect mode=train model=yolov8l.pt data=/kaggle/input/datayaml/data.yaml epochs=120 plots=True
code
128016199/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
# Pip install method (recommended) !pip install ultralytics==8.0.20 from IPython import display display.clear_output() import ultralytics ultralytics.checks()
code
128016199/cell_3
[ "text_plain_output_1.png" ]
!yolo task=detect mode=val model=/kaggle/working/runs/detect/train/weights/best.pt data=/kaggle/input/datayaml/data.yaml
code
128016199/cell_5
[ "image_output_11.png", "text_plain_output_35.png", "image_output_24.png", "text_plain_output_43.png", "image_output_46.png", "text_plain_output_37.png", "image_output_25.png", "text_plain_output_5.png", "text_plain_output_48.png", "text_plain_output_30.png", "image_output_47.png", "text_plain_output_15.png", "image_output_17.png", "image_output_30.png", "text_plain_output_9.png", "text_plain_output_44.png", "image_output_14.png", "image_output_39.png", "text_plain_output_40.png", "image_output_28.png", "text_plain_output_31.png", "text_plain_output_20.png", "image_output_23.png", "image_output_34.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "text_plain_output_45.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "text_plain_output_14.png", "image_output_18.png", "text_plain_output_32.png", "text_plain_output_29.png", "image_output_21.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_24.png", "text_plain_output_21.png", "image_output_31.png", "text_plain_output_47.png", "text_plain_output_25.png", "image_output_20.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "image_output_32.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_22.png", "image_output_42.png", "image_output_35.png", "text_plain_output_38.png", "image_output_41.png", "text_plain_output_7.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "text_plain_output_16.png", "image_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "image_output_27.png", "image_output_6.png", "text_plain_output_41.png", "text_plain_output_34.png", "image_output_45.png", "text_plain_output_42.png", "text_plain_output_23.png", "image_output_12.png", "text_plain_output_28.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png", "text_plain_output_46.png" ]
from IPython import display from IPython.display import Image, display import glob import glob from IPython.display import Image, display for image_path in glob.glob('/kaggle/working/runs/detect/predict/*.jpg')[:50]: display(Image(filename=image_path, width=300)) print('\n')
code
106198653/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') space_torr = train_df.corr() sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) plt.figure(figsize=(15, 10)) sns.histplot(data=train_df, x='Age', bins=20, hue='Transported', color=sns.color_palette('flare', as_cmap=True), binwidth=1, kde=True) plt.title('Distribution de la variable Age', fontsize=18) plt.xlabel('Age', fontsize=18) plt.ylabel('Nombre de passagers', fontsize=18)
code
106198653/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('../input/spaceship-titanic/test.csv') space_torr = train_df.corr() sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) exp_feats = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"] fig=plt.figure(figsize=(10,20)) for i, var_name in enumerate(exp_feats): ax=fig.add_subplot(5,2,2*i+1) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=False, hue='Transported') ax.set_title(var_name) ax=fig.add_subplot(5,2,2*i+2) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=True, hue='Transported') plt.ylim([0,100]) ax.set_title(var_name) fig.tight_layout() plt.show() cat_feats = ["HomePlanet", "CryoSleep", "Destination", "VIP"] fig = plt.figure(figsize=(10, 20)) sns.set(font_scale=1.2) for i, var_name in enumerate(cat_feats): ax = fig.add_subplot(4, 1, i + 1) sns.countplot(data=train_df, x=var_name, axes=ax, hue="Transported") ax.set_title(var_name) fig.tight_layout() plt.show() train_df['Total_expense'] = train_df.iloc[:, -7:-2].sum(axis=1) train_df['Spent_money'] = train_df['Total_expense'].apply(lambda x: False if x == 0 else True) test_df['Total_expense'] = test_df.iloc[:, -7:-2].sum(axis=1) test_df['Spent_money'] = test_df['Total_expense'].apply(lambda x: False if x == 0 else True) train_df.loc[train_df['Family_size'] > 100, 'Family_size'] = np.nan test_df.loc[test_df['Family_size'] > 100, 'Family_size'] = np.nan fig = plt.figure(figsize=(30, 15)) sns.set(font_scale=2) sns.countplot(data=train_df, x='Spent_money', hue='Transported') plt.xlabel('Spent_money', fontsize=40) plt.ylabel('Nombre de passagers', fontsize=40)
code
106198653/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd train_df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('../input/spaceship-titanic/test.csv') import missingno as msno msno.matrix(train_df) msno.matrix(test_df)
code
106198653/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import sklearn !pip install miceforest !pip install missingpy import sklearn from sklearn import preprocessing
code
106198653/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') space_torr = train_df.corr() plt.figure(figsize=(12, 8)) sns.heatmap(space_torr)
code
106198653/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') space_torr = train_df.corr() sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) exp_feats = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"] fig=plt.figure(figsize=(10,20)) for i, var_name in enumerate(exp_feats): ax=fig.add_subplot(5,2,2*i+1) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=False, hue='Transported') ax.set_title(var_name) ax=fig.add_subplot(5,2,2*i+2) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=True, hue='Transported') plt.ylim([0,100]) ax.set_title(var_name) fig.tight_layout() plt.show() cat_feats = ['HomePlanet', 'CryoSleep', 'Destination', 'VIP'] fig = plt.figure(figsize=(10, 20)) sns.set(font_scale=1.2) for i, var_name in enumerate(cat_feats): ax = fig.add_subplot(4, 1, i + 1) sns.countplot(data=train_df, x=var_name, axes=ax, hue='Transported') ax.set_title(var_name) fig.tight_layout() plt.show()
code
106198653/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('../input/spaceship-titanic/test.csv') space_torr = train_df.corr() sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) exp_feats = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"] fig=plt.figure(figsize=(10,20)) for i, var_name in enumerate(exp_feats): ax=fig.add_subplot(5,2,2*i+1) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=False, hue='Transported') ax.set_title(var_name) ax=fig.add_subplot(5,2,2*i+2) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=True, hue='Transported') plt.ylim([0,100]) ax.set_title(var_name) fig.tight_layout() plt.show() cat_feats = ["HomePlanet", "CryoSleep", "Destination", "VIP"] fig = plt.figure(figsize=(10, 20)) sns.set(font_scale=1.2) for i, var_name in enumerate(cat_feats): ax = fig.add_subplot(4, 1, i + 1) sns.countplot(data=train_df, x=var_name, axes=ax, hue="Transported") ax.set_title(var_name) fig.tight_layout() plt.show() train_df['Total_expense'] = train_df.iloc[:, -7:-2].sum(axis=1) train_df['Spent_money'] = train_df['Total_expense'].apply(lambda x: False if x == 0 else True) test_df['Total_expense'] = test_df.iloc[:, -7:-2].sum(axis=1) test_df['Spent_money'] = test_df['Total_expense'].apply(lambda x: False if x == 0 else True) train_df['Name'].fillna('Anonymous Anonymous', inplace=True) test_df['Name'].fillna('Anonymous Anonymous', inplace=True) train_df['Last_name'] = train_df['Name'].apply(lambda x: x.split(' ')[-1] if type(x) == str else None) test_df['Last_name'] = test_df['Name'].apply(lambda x: x.split(' ')[-1] if type(x) == str else None) train_df.loc[train_df['Family_size'] > 100, 'Family_size'] = np.nan test_df.loc[test_df['Family_size'] > 100, 'Family_size'] = np.nan fig = plt.figure(figsize=(30, 15)) sns.set(font_scale=2) sns.countplot(data=train_df, x="Spent_money", hue="Transported") plt.xlabel("Spent_money", fontsize=40) plt.ylabel("Nombre de passagers", fontsize=40) train_df.Cabin.fillna('Z/6666/Z', inplace=True) test_df.Cabin.fillna('Z/6666/Z', inplace=True) train_df['Cabin_num'] = train_df['Cabin'].apply(lambda x: x.split('/')[1]).astype(int) test_df['Cabin_num'] = test_df['Cabin'].apply(lambda x: x.split('/')[1]).astype(int) train_df['Deck'] = train_df['Cabin'].apply(lambda x: x[0]) train_df['Side'] = train_df['Cabin'].apply(lambda x: x[-1]) test_df['Deck'] = test_df['Cabin'].apply(lambda x: x[0]) test_df['Side'] = test_df['Cabin'].apply(lambda x: x[-1]) train_df.Cabin.replace('Z/6666/Z', np.nan, inplace=True) test_df.Cabin.replace('Z/6666/Z', np.nan, inplace=True) train_df.Deck.replace('Z', np.nan, inplace=True) test_df.Deck.replace('Z', np.nan, inplace=True) train_df.Side.replace('Z', np.nan, inplace=True) test_df.Side.replace('Z', np.nan, inplace=True) fig = plt.figure(figsize=(15, 10)) sns.set(font_scale=1.2) sns.histplot(data=train_df, x='Cabin_num', hue='Transported', binwidth=50) plt.xlim([0, 2000]) plt.xlabel('Numéro de cabine', fontsize=18) plt.ylabel('Nombre de passagers', fontsize=18)
code
106198653/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') space_torr = train_df.corr() sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) exp_feats = ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] fig = plt.figure(figsize=(10, 20)) for i, var_name in enumerate(exp_feats): ax = fig.add_subplot(5, 2, 2 * i + 1) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=False, hue='Transported') ax.set_title(var_name) ax = fig.add_subplot(5, 2, 2 * i + 2) sns.histplot(data=train_df, x=var_name, axes=ax, bins=30, kde=True, hue='Transported') plt.ylim([0, 100]) ax.set_title(var_name) fig.tight_layout() plt.show()
code
106198653/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/spaceship-titanic/train.csv') space_torr = train_df.corr() plt.figure(figsize=(12, 8)) sns.set(font_scale=1.5) colors = sns.color_palette('Paired') explode = (0.05, 0.05) plt.pie(train_df.Transported.value_counts().values, labels=train_df.Transported.value_counts().index, colors=colors, explode=explode, startangle=0, shadow=True, autopct='%.1f%%') plt.title('Distribution de la variable cible', fontsize=18)
code
106198653/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd train_df = pd.read_csv('../input/spaceship-titanic/train.csv') import missingno as msno msno.matrix(train_df)
code
105178234/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.head(3)
code
105178234/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape
code
105178234/cell_34
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df[df['target'] == 0][['num_char', 'NUm_words', 'Num_sentence']].describe()
code
105178234/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df['target'].value_counts()
code
105178234/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df.head()
code
105178234/cell_33
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df[['num_char', 'NUm_words', 'Num_sentence']].describe()
code
105178234/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape
code
105178234/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape import matplotlib.pyplot as plt sns.pairplot(df, hue='target')
code
105178234/cell_39
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape import matplotlib.pyplot as plt plt.figure(figsize=(12, 6)) sns.histplot(df[df['target'] == 0]['num_char'], color='green') sns.histplot(df[df['target'] == 1]['num_char'], color='red')
code
105178234/cell_26
[ "text_plain_output_1.png" ]
import nltk import nltk nltk.download('punkt')
code
105178234/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.info()
code
105178234/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum()
code