path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104131002/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() df[num_cols].describe().T
code
104131002/cell_3
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104131002/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() for col in num_cols: num_summary(df, col, plot=True)
code
104131002/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe cat_but_car
code
104131002/cell_22
[ "text_plain_output_4.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) df['last_order_date'].max() analysis_date = dt.datetime(2021, 6, 1) df['recency'] = (analysis_date - df['last_order_date']).astype('timedelta64[D]') df['tenure'] = (df['last_order_date'] - df['first_order_date']).astype('timedelta64[D]') model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df.head()
code
104131002/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() check_df(df)
code
104131002/cell_27
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import datetime as dt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) def high_correlated_cols(dataframe, plot=False, corr_th=0.9): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: import seaborn as sns import matplotlib.pyplot as plt sns.set(rc={'figure.figsize': (15, 15)}) return drop_list high_correlated_cols(df, plot=True) df['last_order_date'].max() analysis_date = dt.datetime(2021, 6, 1) df['recency'] = (analysis_date - df['last_order_date']).astype('timedelta64[D]') df['tenure'] = (df['last_order_date'] - df['first_order_date']).astype('timedelta64[D]') model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) model_df.head()
code
104131002/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe cat_cols
code
18115772/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') df_train.drop_duplicates(keep='first', subset=['date_time'], inplace=True) df_train.shape df_train.drop(['date_time'], inplace=True, axis=1) df_train.info()
code
18115772/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') df_train.describe()
code
18115772/cell_11
[ "text_html_output_1.png" ]
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import m
code
18115772/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18115772/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') df_train.drop_duplicates(keep='first', subset=['date_time'], inplace=True) df_train.shape df_train.drop(['date_time'], inplace=True, axis=1) df_train.head(5)
code
18115772/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') print('The shape of the train dataset is' + str(df_train.shape)) print('The shape of the test dataset is' + str(df_test.shape))
code
18115772/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') df_train.drop_duplicates(keep='first', subset=['date_time'], inplace=True) df_train.shape df_train.drop(['date_time'], inplace=True, axis=1) df_train.head(10)
code
18115772/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/Train.csv') df_test = pd.read_csv('../input/Test.csv') df_test.describe()
code
74049349/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') print(f'Train Shape: {train_df.shape}') print(f'Test Shape : {test_df.shape}')
code
74049349/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns pd.crosstab(train_df.Sex, train_df.Survived)
code
74049349/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns num_cols
code
74049349/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') train_df.describe().transpose()
code
74049349/cell_26
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() print(f'Class: 0 = {1 - percent_survived:.2f} 1 = {percent_survived:.2f}')
code
74049349/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') train_df.head()
code
74049349/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt plt.style.available
code
74049349/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns cat_cols
code
74049349/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') train_df.info()
code
74049349/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') test_df.info()
code
74049349/cell_38
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns pd.crosstab(train_df.Sex, train_df.Survived) survive_percent_male = train_df[train_df.Sex == 'male'].Survived.sum() / train_df[train_df.Sex == 'male'].Survived.count() survive_percent_female = train_df[train_df.Sex == 'female'].Survived.sum() / train_df[train_df.Sex == 'female'].Survived.count() pd.crosstab(train_df.Pclass, train_df.Survived) pd.crosstab(train_df.Embarked, train_df.Survived, margins=True)
code
74049349/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) sns.countplot('Survived', data=train_df)
code
74049349/cell_22
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) features.head()
code
74049349/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns pd.crosstab(train_df.Sex, train_df.Survived) survive_percent_male = train_df[train_df.Sex == 'male'].Survived.sum() / train_df[train_df.Sex == 'male'].Survived.count() survive_percent_female = train_df[train_df.Sex == 'female'].Survived.sum() / train_df[train_df.Sex == 'female'].Survived.count() pd.crosstab(train_df.Pclass, train_df.Survived)
code
74049349/cell_36
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') sample_sub = pd.read_csv('../input/titanic/gender_submission.csv') y = train_df['Survived'] features = train_df.drop(['Survived'], axis=1) percent_survived = train_df.Survived.sum() / train_df.Survived.count() num_cols = train_df.select_dtypes(include=['int64', 'float64']).columns cat_cols = train_df.select_dtypes(include=['object', 'bool']).columns pd.crosstab(train_df.Sex, train_df.Survived) survive_percent_male = train_df[train_df.Sex == 'male'].Survived.sum() / train_df[train_df.Sex == 'male'].Survived.count() survive_percent_female = train_df[train_df.Sex == 'female'].Survived.sum() / train_df[train_df.Sex == 'female'].Survived.count() print(f'Male Survivors : {survive_percent_male:.3}') print(f'Female Survivors: {survive_percent_female:.3}')
code
2006313/cell_13
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_home_prices = myModel.predict(X) mean_absolute_error(y, predicted_home_prices)
code
2006313/cell_25
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing def score_dataset(Xtrain, Xtest, ytrain, ytest): forest_model = RandomForestRegressor() forest_model.fit(Xtrain, ytrain) preds = forest_model.predict(Xtest) return mean_absolute_error(ytest, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_test = X_test.drop(cols_with_missing, axis=1) print('Mean Absolute Error from dropping columns with Missing Values:') print(score_dataset(reduced_X_train, reduced_X_test, y_train, y_test))
code
2006313/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice print(price.head())
code
2006313/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing
code
2006313/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] two_columns_of_data.describe()
code
2006313/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing df = pd.read_csv(main_file_path) target = df.SalePrice predictors = df.drop(['SalePrice'], axis=1) numeric_predictors = df.select_dtypes(exclude=['object']) predictors.dtypes.sample(10)
code
2006313/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) print(df.describe())
code
2006313/cell_11
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y) print('Making predictions for the following 5 houses:') print(X.head()) print('The predictions are') print(myModel.predict(X.head()))
code
2006313/cell_19
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) print(mean_absolute_error(val_y, preds))
code
2006313/cell_28
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import Imputer import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing def score_dataset(Xtrain, Xtest, ytrain, ytest): forest_model = RandomForestRegressor() forest_model.fit(Xtrain, ytrain) preds = forest_model.predict(Xtest) return mean_absolute_error(ytest, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_test = X_test.drop(cols_with_missing, axis=1) from sklearn.preprocessing import Imputer my_imputer = Imputer() imputed_X_train = my_imputer.fit_transform(X_train) imputed_X_test = my_imputer.transform(X_test) imputed_X_train_plus = X_train.copy() imputed_X_test_plus = X_test.copy() cols_with_missing = (col for col in X_train.columns if X_train[col].isnull().any()) for col in cols_with_missing: imputed_X_train_plus[col + '_was_missing'] = imputed_X_train_plus[col].isnull() imputed_X_test_plus[col + '_was_missing'] = imputed_X_test_plus[col].isnull() my_imputer = Imputer() imputed_X_train_plus = my_imputer.fit_transform(imputed_X_train_plus) imputed_X_test_plus = my_imputer.transform(imputed_X_test_plus) print('Mean Absolute Error from Imputation while Track What Was Imputed:') print(score_dataset(imputed_X_train_plus, imputed_X_test_plus, y_train, y_test))
code
2006313/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) print(df.columns)
code
2006313/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae for max_leaf_nodes in [5, 50, 500, 5000]: my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y) print('Max leaf nodes: %d \t\t Mean Absolute Error: %d' % (max_leaf_nodes, my_mae))
code
2006313/cell_14
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_home_prices = myModel.predict(X) mean_absolute_error(y, predicted_home_prices) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0) myModel = DecisionTreeRegressor() myModel.fit(train_X, train_y) val_predictions = myModel.predict(val_X) print(mean_absolute_error(val_y, val_predictions))
code
2006313/cell_10
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y)
code
2006313/cell_27
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import Imputer import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing def score_dataset(Xtrain, Xtest, ytrain, ytest): forest_model = RandomForestRegressor() forest_model.fit(Xtrain, ytrain) preds = forest_model.predict(Xtest) return mean_absolute_error(ytest, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_test = X_test.drop(cols_with_missing, axis=1) from sklearn.preprocessing import Imputer my_imputer = Imputer() imputed_X_train = my_imputer.fit_transform(X_train) imputed_X_test = my_imputer.transform(X_test) print('Mean Absolute Error from Imputation:') print(score_dataset(imputed_X_train, imputed_X_test, y_train, y_test))
code
2006313/cell_37
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Imputer from sklearn.preprocessing import Imputer from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing def score_dataset(Xtrain, Xtest, ytrain, ytest): forest_model = RandomForestRegressor() forest_model.fit(Xtrain, ytrain) preds = forest_model.predict(Xtest) return mean_absolute_error(ytest, preds) df = pd.read_csv(main_file_path) target = df.SalePrice predictors = df.drop(['SalePrice'], axis=1) numeric_predictors = df.select_dtypes(exclude=['object']) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_test = X_test.drop(cols_with_missing, axis=1) from sklearn.preprocessing import Imputer my_imputer = Imputer() imputed_X_train = my_imputer.fit_transform(X_train) imputed_X_test = my_imputer.transform(X_test) imputed_X_train_plus = X_train.copy() imputed_X_test_plus = X_test.copy() cols_with_missing = (col for col in X_train.columns if X_train[col].isnull().any()) for col in cols_with_missing: imputed_X_train_plus[col + '_was_missing'] = imputed_X_train_plus[col].isnull() imputed_X_test_plus[col + '_was_missing'] = imputed_X_test_plus[col].isnull() my_imputer = Imputer() imputed_X_train_plus = my_imputer.fit_transform(imputed_X_train_plus) imputed_X_test_plus = my_imputer.transform(imputed_X_test_plus) from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Imputer my_pipeline = make_pipeline(Imputer(), RandomForestRegressor()) my_pipeline.fit(X_train, y_train) predictions = my_pipeline.predict(X_test) my_imputer = Imputer() my_model = RandomForestRegressor() imputed_train_X = my_imputer.fit_transform(X_train) imputed_test_X = my_imputer.transform(X_test) my_model.fit(imputed_train_X, y_train) predictions = my_model.predict(imputed_test_X) import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data = pd.read_csv('../input/train.csv') data.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data.SalePrice X = data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_X, train_y, verbose=False) predictions = my_model.predict(test_X) from sklearn.metrics import mean_absolute_error my_model = XGBRegressor(n_estimators=1000) my_model.fit(train_X, train_y, early_stopping_rounds=5, eval_set=[(test_X, test_y)], verbose=False) predictions = my_model.predict(test_X) from sklearn.metrics import mean_absolute_error print('Mean Absolute Error : ' + str(mean_absolute_error(predictions, test_y)))
code
2006313/cell_36
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Imputer from sklearn.preprocessing import Imputer from sklearn.preprocessing import Imputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' df = pd.read_csv(main_file_path) price = df.SalePrice columns_of_interest = ['HouseStyle', 'SaleCondition'] two_columns_of_data = df[columns_of_interest] y = price columns_of_interest = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = df[columns_of_interest] from sklearn.tree import DecisionTreeRegressor myModel = DecisionTreeRegressor() myModel.fit(X, y) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) preds = forest_model.predict(val_X) cols_with_missing = [col for col in df.columns if df[col].isnull().any()] cols_with_missing def score_dataset(Xtrain, Xtest, ytrain, ytest): forest_model = RandomForestRegressor() forest_model.fit(Xtrain, ytrain) preds = forest_model.predict(Xtest) return mean_absolute_error(ytest, preds) df = pd.read_csv(main_file_path) target = df.SalePrice predictors = df.drop(['SalePrice'], axis=1) numeric_predictors = df.select_dtypes(exclude=['object']) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_test = X_test.drop(cols_with_missing, axis=1) from sklearn.preprocessing import Imputer my_imputer = Imputer() imputed_X_train = my_imputer.fit_transform(X_train) imputed_X_test = my_imputer.transform(X_test) imputed_X_train_plus = X_train.copy() imputed_X_test_plus = X_test.copy() cols_with_missing = (col for col in X_train.columns if X_train[col].isnull().any()) for col in cols_with_missing: imputed_X_train_plus[col + '_was_missing'] = imputed_X_train_plus[col].isnull() imputed_X_test_plus[col + '_was_missing'] = imputed_X_test_plus[col].isnull() my_imputer = Imputer() imputed_X_train_plus = my_imputer.fit_transform(imputed_X_train_plus) imputed_X_test_plus = my_imputer.transform(imputed_X_test_plus) from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Imputer my_pipeline = make_pipeline(Imputer(), RandomForestRegressor()) my_pipeline.fit(X_train, y_train) predictions = my_pipeline.predict(X_test) my_imputer = Imputer() my_model = RandomForestRegressor() imputed_train_X = my_imputer.fit_transform(X_train) imputed_test_X = my_imputer.transform(X_test) my_model.fit(imputed_train_X, y_train) predictions = my_model.predict(imputed_test_X) import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import Imputer data = pd.read_csv('../input/train.csv') data.dropna(axis=0, subset=['SalePrice'], inplace=True) y = data.SalePrice X = data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_y, test_y = train_test_split(X.as_matrix(), y.as_matrix(), test_size=0.25) my_imputer = Imputer() train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_X, train_y, verbose=False) predictions = my_model.predict(test_X) from sklearn.metrics import mean_absolute_error print('Mean Absolute Error : ' + str(mean_absolute_error(predictions, test_y)))
code
128018979/cell_4
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(0) x = np.random.rand(100, 1) y = 2 + 3 * x + np.random.rand(100, 1) model = LinearRegression() model.fit(x, y) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(123) x = np.linspace(0, 10, 20) y = 3 * x + 2 + np.random.normal(scale=2, size=len(x)) model = LinearRegression() model.fit(x.reshape(-1, 1), y) y_pred = model.predict(x.reshape(-1, 1)) residuals = y - y_pred SSR = np.sum(residuals ** 2) print('Sum of squared residuals (SSR): {:.2f}'.format(SSR))
code
128018979/cell_2
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(0) x = np.random.rand(100, 1) y = 2 + 3 * x + np.random.rand(100, 1) model = LinearRegression() model.fit(x, y) plt.scatter(x, y, s=10) plt.plot(x, model.predict(x), color='r') plt.xlabel('X') plt.ylabel('Y') plt.show()
code
128018979/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(0) x = np.random.rand(100, 1) y = 2 + 3 * x + np.random.rand(100, 1) model = LinearRegression() model.fit(x, y) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(123) x = np.linspace(0, 10, 20) y = 3 * x + 2 + np.random.normal(scale=2, size=len(x)) model = LinearRegression() model.fit(x.reshape(-1, 1), y) y_pred = model.predict(x.reshape(-1, 1)) residuals = y - y_pred plt.scatter(x, y, s=10) plt.plot(x, y_pred, color='red') plt.legend(['Regression line', 'Data']) plt.xlabel('X') plt.ylabel('Y') print('Residuals:') for i in range(10): print('Data point {}: {:.2f}'.format(i + 1, residuals[i]))
code
128018979/cell_5
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(0) x = np.random.rand(100, 1) y = 2 + 3 * x + np.random.rand(100, 1) model = LinearRegression() model.fit(x, y) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(123) x = np.linspace(0, 10, 20) y = 3 * x + 2 + np.random.normal(scale=2, size=len(x)) model = LinearRegression() model.fit(x.reshape(-1, 1), y) y_pred = model.predict(x.reshape(-1, 1)) residuals = y - y_pred SSR = np.sum(residuals ** 2) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression np.random.seed(0) x = np.random.rand(100, 1) y = 2 + 3 * x + np.random.rand(100, 1) model = LinearRegression() model.fit(x, y) sse = np.sum((y - model.predict(x)) ** 2) residuals = y - model.predict(x) fig, axs = plt.subplots(2, 2, figsize=(10, 8)) axs[0, 0].scatter(x, y, s=10) axs[0, 0].plot(x, model.predict(x), color='r') axs[0, 0].set_xlabel('X') axs[0, 0].set_ylabel('Y') axs[0, 0].set_title('Original Regression Line') model_high_slope = LinearRegression() model_high_slope.fit(x, y + 0.5) sse_high_slope = np.sum((y - model_high_slope.predict(x)) ** 2) residuals_high_slope = y - model_high_slope.predict(x) axs[0, 1].scatter(x, y, s=10) axs[0, 1].plot(x, model_high_slope.predict(x), color='r') axs[0, 1].set_xlabel('X') axs[0, 1].set_ylabel('Y') axs[0, 1].set_title('Line with Higher Slope') model_low_slope = LinearRegression() model_low_slope.fit(x, y - 0.5) sse_low_slope = np.sum((y - model_low_slope.predict(x)) ** 2) residuals_low_slope = y - model_low_slope.predict(x) axs[1, 0].scatter(x, y, s=10) axs[1, 0].plot(x, model_low_slope.predict(x), color='r') axs[1, 0].set_xlabel('X') axs[1, 0].set_ylabel('Y') axs[1, 0].set_title('Line with Lower Slope') model_diff_intercept = LinearRegression() model_diff_intercept.fit(x, y + 1) sse_diff_intercept = np.sum((y - model_diff_intercept.predict(x)) ** 2) residuals_diff_intercept = y - model_diff_intercept.predict(x) axs[1, 1].scatter(x, y, s=10) axs[1, 1].plot(x, model_diff_intercept.predict(x), color='r') axs[1, 1].set_xlabel('X') axs[1, 1].set_ylabel('Y') axs[1, 1].set_title('Line with Different Intercept') fig, axs = plt.subplots(2, 2, figsize=(10, 8)) axs[0, 0].scatter(model.predict(x), residuals, s=10) axs[0, 0].set_xlabel('Y Predicted') axs[0, 0].set_ylabel('Residuals') axs[0, 0].set_title('Residuals for Original Line') axs[0, 1].scatter(model_high_slope.predict(x), residuals_high_slope, s=10) axs[0, 1].set_xlabel('Y Predicted') axs[0, 1].set_ylabel('Residuals') axs[0, 1].set_title('Residuals for Line with Higher Slope') axs[1, 0].scatter(model_low_slope.predict(x), residuals_low_slope, s=10) axs[1, 0].set_xlabel('Y Predicted') axs[1, 0].set_ylabel('Residuals') axs[1, 0].set_title('Residuals for Line with Lower Slope') axs[1, 1].scatter(model_diff_intercept.predict(x), residuals_diff_intercept, s=10) axs[1, 1].set_xlabel('Y Predicted') axs[1, 1].set_ylabel('Residuals') axs[1, 1].set_title('Residuals for Line with Different Intercept') plt.tight_layout() plt.show()
code
128048760/cell_9
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) print(tokens)
code
128048760/cell_4
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) print(tokenized_text)
code
128048760/cell_6
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) print(tokenized_text)
code
128048760/cell_11
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) print(tokenizer.convert_ids_to_tokens(token_ids))
code
128048760/cell_19
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model import torch import torch text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) token_ids = tokenizer.encode(text) tokens_from_encode = tokenizer.convert_ids_to_tokens(token_ids) example_word = 'cyber' example_token_id = tokenizer.convert_tokens_to_ids([example_word])[0] example_embedding = model.embeddings.word_embeddings(torch.tensor([example_token_id])) word_one = 'king' word_two = 'queen' one_token_id = tokenizer.convert_tokens_to_ids([word_one])[0] one_embedding = model.embeddings.word_embeddings(torch.tensor([one_token_id])) two_token_id = tokenizer.convert_tokens_to_ids([word_two])[0] two_embedding = model.embeddings.word_embeddings(torch.tensor([two_token_id])) cos = torch.nn.CosineSimilarity(dim=1) cosine_similarity = cos(one_embedding, two_embedding) euclidean_distance = torch.cdist(one_embedding, two_embedding) word_three = 'man' word_four = 'woman' three_token_id = tokenizer.convert_tokens_to_ids([word_three])[0] three_embedding = model.embeddings.word_embeddings(torch.tensor([three_token_id])) four_token_id = tokenizer.convert_tokens_to_ids([word_four])[0] four_embedding = model.embeddings.word_embeddings(torch.tensor([four_token_id])) cos = torch.nn.CosineSimilarity(dim=1) cosine_similarity = cos(three_embedding, four_embedding) euclidean_distance = torch.cdist(three_embedding, four_embedding) print(f"Cosine Similarity between '{word_three}' and '{word_four}': {cosine_similarity[0]}") print(f"Euclidean Distance between '{word_three}' and '{word_four}': {euclidean_distance[0][0]}")
code
128048760/cell_18
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model import torch import torch text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) token_ids = tokenizer.encode(text) tokens_from_encode = tokenizer.convert_ids_to_tokens(token_ids) example_word = 'cyber' example_token_id = tokenizer.convert_tokens_to_ids([example_word])[0] example_embedding = model.embeddings.word_embeddings(torch.tensor([example_token_id])) word_one = 'king' word_two = 'queen' one_token_id = tokenizer.convert_tokens_to_ids([word_one])[0] one_embedding = model.embeddings.word_embeddings(torch.tensor([one_token_id])) two_token_id = tokenizer.convert_tokens_to_ids([word_two])[0] two_embedding = model.embeddings.word_embeddings(torch.tensor([two_token_id])) cos = torch.nn.CosineSimilarity(dim=1) cosine_similarity = cos(one_embedding, two_embedding) euclidean_distance = torch.cdist(one_embedding, two_embedding) print(f"Cosine Similarity between '{word_one}' and '{word_two}': {cosine_similarity[0]}") print(f"Euclidean Distance between '{word_one}' and '{word_two}': {euclidean_distance[0][0]}")
code
128048760/cell_15
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model import torch text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) token_ids = tokenizer.encode(text) tokens_from_encode = tokenizer.convert_ids_to_tokens(token_ids) example_word = 'cyber' example_token_id = tokenizer.convert_tokens_to_ids([example_word])[0] example_embedding = model.embeddings.word_embeddings(torch.tensor([example_token_id])) print(example_embedding.shape) print(example_embedding)
code
128048760/cell_3
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased')
code
128048760/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) print(token_ids)
code
128048760/cell_12
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model text = 'Hello World!' from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = 'i love cryptography, mathematics, and cybersecurity.' tokenized_text = tokenizer.tokenize(text) text = 'I love cryptography, mathematics, and cybersecurity.' tokens = tokenizer.tokenize(text) token_ids = tokenizer.convert_tokens_to_ids(tokens) token_ids = tokenizer.encode(text) print(token_ids) tokens_from_encode = tokenizer.convert_ids_to_tokens(token_ids) print(tokens_from_encode)
code
128048760/cell_5
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, GPT2Model from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') from transformers import AutoTokenizer, GPT2Model tokenizer = AutoTokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2')
code
89141968/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data
code
89141968/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How likely do you feel yourself vulnerable or lonely?'].mode()
code
89141968/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How often do you get offended or angry or start crying ?'].mode()
code
89141968/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How comfortable are you in talking about your mental health?'].mode()
code
89141968/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['Have you taken any therapy or medication in the near past for mental health?'].mode()
code
89141968/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data
code
89141968/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns}
code
89141968/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?'].mode()
code
89141968/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns}
code
89141968/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum()
code
89141968/cell_48
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data.select_dtypes('object') data = binary_encode(data, columns=['Are you above 30 years of age?'], positive_values=['Yes']) data = binary_encode(data, columns=['Prediction_status'], positive_values=['True']) data.select_dtypes('object') print('Remaining non-numeric columns:', len(data.select_dtypes('object').columns))
code
89141968/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How likely do you feel yourself vulnerable or lonely?'].mode()
code
89141968/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?'].mode()
code
89141968/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean()
code
89141968/cell_49
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data.select_dtypes('object') data = binary_encode(data, columns=['Are you above 30 years of age?'], positive_values=['Yes']) data = binary_encode(data, columns=['Prediction_status'], positive_values=['True']) data.select_dtypes('object') data.tail()
code
89141968/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns}
code
89141968/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['How comfortable are you in talking about your mental health?'].unique()
code
89141968/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['Has the COVID-19 pandemic affected your mental well being?'].mode()
code
89141968/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data.select_dtypes('object') data = binary_encode(data, columns=['Are you above 30 years of age?'], positive_values=['Yes']) data = binary_encode(data, columns=['Prediction_status'], positive_values=['True']) data.select_dtypes('object')
code
89141968/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['If sad, how likely are you to take an appointment with a psychologist or a counsellor for your current mental state?'].mode()
code
89141968/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data.select_dtypes('object')
code
89141968/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns}
code
89141968/cell_53
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data = data.drop(['Timestamp', 'Email address', 'Name', 'Employment Status', 'Prediction'], axis=1) data = data.drop(['City'], axis=1) {column: len(data[column].unique()) for column in data.select_dtypes('object').columns} {column: list(data[column].unique()) for column in data.select_dtypes('object').columns} {column: len(data[column].unique()) for column in data.select_dtypes('float64').columns} {column: len(data[column].unique()) for column in data.select_dtypes('int64').columns} target = 'Prediction_status' ordinal_features = ['How are you feeling today?', 'Is your sadness momentarily or has it been constant for a long time?', 'At what time of the day are you extremely low?', 'How frequently have you had little pleasure or interest in the activities you usually enjoy?', 'Describe how ‘supported’ you feel by others around you – your friends, family, or otherwise.', 'How frequently have you been doing things that mean something to you or your life?', 'How easy is it for you to take medical leave for a mental health condition?', 'How often do you make use of substance abuse(e.g. smoking, alcohol)?', 'How many hours do you spend per day on watching mobile phone, laptop, computer, television, etc.?', 'How often do you get offended or angry or start crying ?'] nominal_features = ['eating and sleeping', '(If sad)have you been in the same mental state for the past few days?', 'Has there been a sudden and huge change in your life?', 'Your stress is related to which of the following areas?', 'If you have a mental health condition, do you feel that it interferes with your work?', 'Have you taken any therapy or medication in the near past for mental health?', 'Having trouble concentrating on things, such as reading the newspaper or watching television, or studying?', 'Do you feel bad about yourself — or that you are a failure or have let yourself or your family down?', 'Has the COVID-19 pandemic affected your mental well being?'] def binary_encode(df, columns, positive_values): df = df.copy() for column, positive_value in zip(columns, positive_values): df[column] = df[column].apply(lambda x: 1 if x == positive_value else 0) return df def ordinal_encode(df, columns, orderings): df = df.copy() for column, ordering in zip(columns, orderings): df[column] = df[column].apply(lambda x: ordering.index(x)) return df def nominal_encode(df, columns, prefixes): df = df.copy() for column, prefix in zip(columns, prefixes): dummies = pd.get_dummies(df[column], prefix) df = pd.concat([df, dummies], axis=1) df = df.drop(column, axis=1) return df ordinal_orderings = [['Good', 'Fine', 'Sad', 'Depressed'], ['Not sad', 'For some time', 'Significant time', 'Long time'], ['Morning', 'Afternoon', 'Evening'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['Highly supportive', 'Satisfactory', 'Little bit', 'Not at all'], ['Very Often', 'Often', 'Sometimes', 'Never'], ['Very easy', 'Easy', 'Not so easy', 'Difficult'], ['Never', 'Sometimes', 'Often', 'Very Often'], ['1-2 hours', '2-5 hours', '5-10 hours', 'More than 10 hours'], ['Never', 'Sometimes', 'Often', 'Very often']] nominal_prefixes = ['es', 'smen', 'change', 'stress', 'inter', 'ther', 'conc', 'fbad', 'cov'] data = nominal_encode(data, columns=nominal_features, prefixes=nominal_prefixes) data = ordinal_encode(data, columns=ordinal_features, orderings=ordinal_orderings) data.select_dtypes('object') data = binary_encode(data, columns=['Are you above 30 years of age?'], positive_values=['Yes']) data = binary_encode(data, columns=['Prediction_status'], positive_values=['True']) data.select_dtypes('object') print('Remaining missing values:', data.isna().sum().sum())
code
89141968/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/mental-health-dataset/Mental Health Questionnaire 2.0.csv') data.isna().sum() data.isna().mean() data['(If sad)have you been in the same mental state for the past few days?'].mode()
code
17117770/cell_11
[ "text_plain_output_1.png" ]
from sklearn.svm import SVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') model = SVC(gamma='auto') features = list(train.columns) features.remove('label') model.fit(train_df[features], train_df['label']) model.score(valid_df[features], valid_df['label'])
code
17117770/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17117770/cell_10
[ "text_html_output_1.png" ]
from sklearn.svm import SVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') model = SVC(gamma='auto') features = list(train.columns) features.remove('label') model.fit(train_df[features], train_df['label'])
code
17117770/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') train.head()
code
34121580/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna
code
34121580/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) explode = [] for i in labels: explode.append(0.05) centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.tight_layout() df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna df3 = df2.drop(37) plt.figure(figsize=(10, 10)) plt.hlines(y=df3['State'], xmin=0, xmax=40, color='grey', alpha=0.4) plt.scatter(df3['Red'], df3['State'], color='skyblue', label='Red Zone') plt.scatter(df3['Orange'], df3['State'], color='Green', label='Orange Zone') plt.scatter(df3['Green'], df3['State'], color='Red', label='Green Zone')
code
34121580/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') print(df1)
code
34121580/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna df3 = df2.drop(37) df3
code
34121580/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) explode = [] for i in labels: explode.append(0.05) centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.tight_layout() df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna df3 = df2.drop(37) plt.hlines(y=df3['State'], xmin=0, xmax=40, color='grey', alpha=0.4) df4 = pd.read_csv('../input/covid-19-india/hotty.csv') df4.columns hotty = pd.pivot_table(df4, values=['Red', 'Orange', 'Green', 'Total'], index='State', aggfunc='max') state_names = list(hotty.index) hotty['State'] = state_names sns.set_color_codes('pastel') plt.figure(figsize=(15, 10)) sns.barplot(x='Total', y='State', data=df4, label='Total', color='#9370db') sns.barplot(x='Red', y='State', data=df4, label='Total', color='Blue') sns.barplot(x='Orange', y='State', data=df4, label='Total', color='Red') sns.barplot(x='Green', y='State', data=df4, label='Total', color='Green')
code
34121580/cell_20
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State
code
34121580/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) explode = [] for i in labels: explode.append(0.05) centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.tight_layout() df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna df3 = df2.drop(37) plt.hlines(y=df3['State'], xmin=0, xmax=40, color='grey', alpha=0.4) df4 = pd.read_csv('../input/covid-19-india/hotty.csv') df4.columns hotty = pd.pivot_table(df4, values=['Red', 'Orange', 'Green', 'Total'], index='State', aggfunc='max') state_names = list(hotty.index) hotty['State'] = state_names plt.figure(figsize=(25, 10)) sns.set_color_codes('pastel')
code
34121580/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) df1['Active'] = df1['Confirmed'] - (df1['Deceased'] + df1['Recovered']) df1['Deceased Rate (per 100)'] = np.round(100 * df1['Deceased'] / df1['Confirmed'], 2) df1['Recovered Rate (per 100)'] = np.round(100 * df1['Recovered'] / df1['Confirmed'], 2) df1.sort_values('Confirmed', ascending=False).fillna(0).style.background_gradient(cmap='Blues', subset=['Confirmed']).background_gradient(cmap='Blues', subset=['Deceased']).background_gradient(cmap='Blues', subset=['Recovered']).background_gradient(cmap='Blues', subset=['Active']).background_gradient(cmap='Blues', subset=['Deceased Rate (per 100)']).background_gradient(cmap='Blues', subset=['Recovered Rate (per 100)']) df1.columns
code
34121580/cell_11
[ "text_plain_output_1.png" ]
from sklearn import linear_model import pandas as pd import statsmodels.api as sm df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) x = df1['Active'] y = target['Deceased'] model = sm.OLS(y, x).fit() predictions = model.predict(x) model.summary() target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] from sklearn import linear_model lm = linear_model.LinearRegression() model = lm.fit(X, y) predictions = lm.predict(X) print(predictions)
code