path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130013718/cell_12
[ "text_html_output_1.png" ]
from PIL import Image import numpy as np import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf Id = [] import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'): for filename in filenames: Id.append(os.path.join(dirname, filename)) Id[:5] sol_train = pd.DataFrame() sol_train = sol_train.assign(filename=Id) sol_train['label'] = sol_train['filename'] sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '') sol_train['label'] = sol_train['label'].str.split('/').str[0] Id = [] import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'): for filename in filenames: Id.append(os.path.join(dirname, filename)) Id[:5] sol_test = pd.DataFrame() sol_test = sol_test.assign(filename=Id) sol_test['label'] = sol_test['filename'] sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '') sol_test['label'] = sol_test['label'].str.split('/').str[0] import tensorflow as tf import numpy as np from PIL import Image model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet') classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy'] result = [] for i in sol_test.filename: img = Image.open(i).convert('RGB') img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS) inp_numpy = np.array(img)[None] inp = tf.constant(inp_numpy, dtype='float32') class_scores = model(inp)[0].numpy() result.append(classes[class_scores.argmax()]) result[:5] result = [] for i in sol_train.filename: img = Image.open(i).convert('RGB') img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS) inp_numpy = np.array(img)[None] inp = tf.constant(inp_numpy, dtype='float32') class_scores = model(inp)[0].numpy() result.append(classes[class_scores.argmax()]) result[:5]
code
130013718/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import os Id = [] import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'): for filename in filenames: Id.append(os.path.join(dirname, filename)) Id[:5] Id = [] import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'): for filename in filenames: Id.append(os.path.join(dirname, filename)) Id[:5]
code
2001102/cell_4
[ "text_html_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train_df = train_df.rename(columns={'train_id': 'id'}) train_df.head()
code
2001102/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train = pd.read_table('../input/train.tsv') train_df = train_df.rename(columns={'train_id': 'id'}) test_df = test_df.rename(columns={'test_id': 'id'}) train_test_combine = pd.concat([train_df.drop(['price'], axis=1), test_df], axis=0) train_test_combine.category_name = train_test_combine.category_name.astype('category') train_test_combine.item_description = train_test_combine.item_description.astype('category') train_test_combine.name = train_test_combine.name.astype('category') train_test_combine.brand_name = train_test_combine.brand_name.astype('category') train_test_combine.name = train_test_combine.name.cat.codes train_test_combine.brand_name = train_test_combine.brand_name.cat.codes train_test_combine.item_description = train_test_combine.item_description.cat.codes train_test_combine.category_name = train_test_combine.category_name.cat.codes train_test_combine = train_test_combine.drop(['brand_name'], axis=1) train_df = train_test_combine.loc[train_test_combine['is_train'] == 1] test_df = train_test_combine.loc[train_test_combine['is_train'] == 0] train_df = train_df.drop(['is_train'], axis=1) test_df = test_df.drop(['is_train'], axis=1) train_df.head()
code
2001102/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train_df = train_df.rename(columns={'train_id': 'id'}) plt.figure(figsize=(20, 15)) plt.hist(train_df['price'], bins=50, range=[0, 300], label='price') plt.xlabel('Price') plt.ylabel('Sample') plt.title('Sale Price Distribution') plt.show()
code
2001102/cell_18
[ "image_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train = pd.read_table('../input/train.tsv') train_df = train_df.rename(columns={'train_id': 'id'}) test_df = test_df.rename(columns={'test_id': 'id'}) train_test_combine = pd.concat([train_df.drop(['price'], axis=1), test_df], axis=0) train_test_combine.category_name = train_test_combine.category_name.astype('category') train_test_combine.item_description = train_test_combine.item_description.astype('category') train_test_combine.name = train_test_combine.name.astype('category') train_test_combine.brand_name = train_test_combine.brand_name.astype('category') train_test_combine.name = train_test_combine.name.cat.codes train_test_combine.brand_name = train_test_combine.brand_name.cat.codes train_test_combine.item_description = train_test_combine.item_description.cat.codes train_test_combine.category_name = train_test_combine.category_name.cat.codes train_test_combine.head()
code
2001102/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train_df = train_df.rename(columns={'train_id': 'id'}) train_df['price'].describe()
code
2001102/cell_31
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier random_forest_model = RandomForestRegressor(n_jobs=-1, min_samples_leaf=3, n_estimators=200) random_forest_model.fit(features_rdf, target_rdf) random_forest_model.score(features_rdf, target_rdf)
code
2001102/cell_14
[ "text_html_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train = pd.read_table('../input/train.tsv') train_df = train_df.rename(columns={'train_id': 'id'}) test_df = test_df.rename(columns={'test_id': 'id'}) train_test_combine = pd.concat([train_df.drop(['price'], axis=1), test_df], axis=0) train_test_combine.head()
code
2001102/cell_10
[ "text_html_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train_df = train_df.rename(columns={'train_id': 'id'}) print(train_df.isnull().sum(), train_df.isnull().sum() / train_df.shape[0] * 100)
code
2001102/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) train = pd.read_table('../input/train.tsv') train_df = train_df.rename(columns={'train_id': 'id'}) test_df = test_df.rename(columns={'test_id': 'id'}) train_test_combine = pd.concat([train_df.drop(['price'], axis=1), test_df], axis=0) train_test_combine.category_name = train_test_combine.category_name.astype('category') train_test_combine.item_description = train_test_combine.item_description.astype('category') train_test_combine.name = train_test_combine.name.astype('category') train_test_combine.brand_name = train_test_combine.brand_name.astype('category') train_test_combine.name = train_test_combine.name.cat.codes train_test_combine.brand_name = train_test_combine.brand_name.cat.codes train_test_combine.item_description = train_test_combine.item_description.cat.codes train_test_combine.category_name = train_test_combine.category_name.cat.codes train_test_combine = train_test_combine.drop(['brand_name'], axis=1) train_df = train_test_combine.loc[train_test_combine['is_train'] == 1] test_df = train_test_combine.loc[train_test_combine['is_train'] == 0] train_df = train_df.drop(['is_train'], axis=1) test_df = test_df.drop(['is_train'], axis=1) test_df.head()
code
2001102/cell_5
[ "text_html_output_1.png" ]
import pandas as pd types_dict_train = {'train_id': 'int64', 'item_condition_id': 'int8', 'price': 'float64', 'shipping': 'int8'} train_df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) types_dict_test = {'test_id': 'int64', 'item_condition_id': 'int8', 'shipping': 'int8'} test_df = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test) test_df = test_df.rename(columns={'test_id': 'id'}) test_df.head()
code
1006233/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df = train_df[pd.isnull(train_df['Age']) == False] features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] processed_features = pd.DataFrame(index=features.index) for col, col_data in features.iteritems(): if col == 'Sex': col_data = col_data.replace(['male', 'female'], [1, 0]) if col == 'Embarked' or col == 'Cabin': col_data = pd.get_dummies(col_data, prefix=col) processed_features = processed_features.join(col_data) processed_features.head(n=20)
code
1006233/cell_6
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df = train_df[pd.isnull(train_df['Age']) == False] features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] processed_features = pd.DataFrame(index=features.index) for col, col_data in features.iteritems(): if col == 'Sex': col_data = col_data.replace(['male', 'female'], [1, 0]) if col == 'Embarked' or col == 'Cabin': col_data = pd.get_dummies(col_data, prefix=col) processed_features = processed_features.join(col_data) from sklearn.linear_model import LogisticRegressionCV from sklearn.tree import DecisionTreeClassifier from sklearn.svm import LinearSVC from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import cross_val_score classifiers = [('Linear SVC', LinearSVC()), ('Decision Tree', DecisionTreeClassifier()), ('Multinomial NB', MultinomialNB())] random_score = float(max(n_survived, n_died)) / float(n_samples) for title, clf in classifiers: score = np.mean(cross_val_score(clf, processed_features, labels, cv=5)) scores = [] for max_depth in range(1, 10): clf = DecisionTreeClassifier(max_depth=max_depth) score = np.mean(cross_val_score(clf, processed_features, labels, cv=5)) print('Max depth of {}: {:.4f}'.format(max_depth, score)) scores.append((max_depth, score))
code
1006233/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df = train_df[pd.isnull(train_df['Age']) == False] features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] print('Number of training samples: {}'.format(n_samples)) print('Number of features: {}'.format(n_features)) print('Number of survivors: {}'.format(n_survived)) print('Number of deaths: {}'.format(n_died))
code
1006233/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1006233/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df = train_df[pd.isnull(train_df['Age']) == False] features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] features.head(n=20)
code
1006233/cell_5
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df = train_df[pd.isnull(train_df['Age']) == False] features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] processed_features = pd.DataFrame(index=features.index) for col, col_data in features.iteritems(): if col == 'Sex': col_data = col_data.replace(['male', 'female'], [1, 0]) if col == 'Embarked' or col == 'Cabin': col_data = pd.get_dummies(col_data, prefix=col) processed_features = processed_features.join(col_data) from sklearn.linear_model import LogisticRegressionCV from sklearn.tree import DecisionTreeClassifier from sklearn.svm import LinearSVC from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import cross_val_score classifiers = [('Linear SVC', LinearSVC()), ('Decision Tree', DecisionTreeClassifier()), ('Multinomial NB', MultinomialNB())] random_score = float(max(n_survived, n_died)) / float(n_samples) print('Random score: {:.4f}'.format(random_score)) for title, clf in classifiers: score = np.mean(cross_val_score(clf, processed_features, labels, cv=5)) print('{} score: {:.4f}'.format(title, score))
code
1008540/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import seaborn as sns from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense from keras.optimizers import Adam from sklearn.model_selection import train_test_split from subprocess import check_output print('Files in Input Directory:') print(check_output(['ls', '../input']).decode('utf8'))
code
18104686/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import os train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') Survials_By_Age = train_data.groupby('Age')['Survived'].sum().reset_index() Survials_By_Age_Segment = [] age_difference = 5 max_age = 70 for i in range(max_age // age_difference): s = 0 for j in range(age_difference): s = s + Survials_By_Age.loc[[i * age_difference + j, 'Age'], 'Survived'][0] Survials_By_Age_Segment.append(s) Survials_By_Age_Segment = pd.Series(Survials_By_Age_Segment, index=list(range(0, max_age, age_difference))) sns.barplot(y=Survials_By_Age_Segment, x=Survials_By_Age_Segment.index) print(Survials_By_Age_Segment)
code
18104686/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import os print(os.listdir('../input')) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.head()
code
18104686/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import os train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') Survials_By_Age = train_data.groupby('Age')['Survived'].sum().reset_index() Survials_By_Age_Segment = [] age_difference = 5 max_age = 70 for i in range(max_age // age_difference): s = 0 for j in range(age_difference): s = s + Survials_By_Age.loc[[i * age_difference + j, 'Age'], 'Survived'][0] Survials_By_Age_Segment.append(s) Survials_By_Age_Segment = pd.Series(Survials_By_Age_Segment, index=list(range(0, max_age, age_difference))) boolean_Survivals = train_data['Survived'] == 1 Survivals = train_data[boolean_Survivals] sns.barplot(y='title', x='average_rating', data=ayu)
code
130008236/cell_21
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True) df.rate = df.rate.astype('float') type(df.rate[0]) X = df.groupby(['location']).count() X.loc[X['name'] == X['name'].max()].index.tolist() X = df.groupby(['location']).mean() A = X.sort_values(by=['rate'], ascending=False) B = X.sort_values(by=['rate']) a = A.iloc[0:5] b = B.iloc[0:5] b['rate'].plot(kind='bar')
code
130008236/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True)
code
130008236/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique()
code
130008236/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].unique()
code
130008236/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df.head()
code
130008236/cell_11
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique()
code
130008236/cell_19
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True) df.rate = df.rate.astype('float') type(df.rate[0]) X = df.groupby(['location']).count() X.loc[X['name'] == X['name'].max()].index.tolist() X = df.groupby(['location']).mean() A = X.sort_values(by=['rate'], ascending=False) B = X.sort_values(by=['rate']) a = A.iloc[0:5] b = B.iloc[0:5] a['rate'].plot(kind='bar')
code
130008236/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130008236/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique()
code
130008236/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True) df.rate = df.rate.astype('float') type(df.rate[0]) X = df.groupby(['location']).count() X.loc[X['name'] == X['name'].max()].index.tolist()
code
130008236/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df['rate'] = [re.sub('\\/\\d', '', i) for i in df['rate']] df
code
130008236/cell_16
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True) df.rate = df.rate.astype('float') type(df.rate[0]) sns.barplot(x='book_table', y='rate', data=df)
code
130008236/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df.head()
code
130008236/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test df['cuisines'] = df['cuisines'].str.split(',') df = df.explode('cuisines') df df['rest_type'] = df['rest_type'].str.split(',') df = df.explode('rest_type') df.reset_index(drop=True) sns.countplot(x='online_order', data=df)
code
130008236/cell_10
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df['rate'] = [re.sub('\\/\\d', '', i) for i in df['rate']] df df.cost.unique() df['cost'] = [re.sub(',', '', i) for i in df['cost']] df
code
130008236/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum() df['rate'].replace('NEW', np.nan, inplace=True) df['rate'].replace('-', np.nan, inplace=True) df = df.dropna() df['rate'].unique() df.cost.unique() df.cost.unique() test = df.iloc[[2]].copy() test['cuisines'] = test['cuisines'].astype(str) test
code
130008236/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import re import seaborn as sns df = pd.read_csv('/kaggle/input/zomato-eda/zomato.csv').copy() df = df.rename(columns={'listed_in(city)': 'city', 'listed_in(type)': 'type', 'approx_cost(for two people)': 'cost'}) df = df.drop(columns=['url', 'address', 'phone', 'dish_liked', 'reviews_list', 'menu_item']) df = df.dropna() df.isnull().sum()
code
88079729/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90146618/cell_9
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.models import load_model from keras.models import load_model import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import cv2 from matplotlib import pyplot as plt from keras.models import load_model from PIL import Image from sklearn.model_selection import train_test_split import os model = load_model('../input/facenet/keras-facenet/model/facenet_keras.h5') def extract_face(filename, required_size=(160, 160)): image = Image.open(filename) image = image.convert('RGB') pixels = np.asarray(image) image = Image.fromarray(pixels) image = image.resize(required_size) face_array = np.asarray(image) return face_array def load_face(dir): faces = list() for filename in os.listdir(dir): path = dir + filename face = extract_face(path) faces.append(face) return faces def load_dataset(dir): X, y = (list(), list()) for subdir in os.listdir(dir): path = dir + subdir + '/' faces = load_face(path) labels = [subdir for i in range(len(faces))] X.extend(faces) y.extend(labels) return (np.asarray(X), np.asarray(y)) def get_embedding(face_pixels): face_pixels = face_pixels.astype('float32') mean, std = (face_pixels.mean(), face_pixels.std()) face_pixels = (face_pixels - mean) / std samples = np.expand_dims(face_pixels, axis=0) yhat = model.predict(samples) return yhat[0] data = np.load('../input/new-masked-face/extracted_masked_unmasked.npz') trainX, trainy, testX, testy = (data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']) print(trainX.shape)
code
90146618/cell_2
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import cv2 from matplotlib import pyplot as plt from keras.models import load_model from PIL import Image from sklearn.model_selection import train_test_split import os print(os.listdir('/'))
code
90146618/cell_11
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.models import load_model from keras.models import load_model import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import cv2 from matplotlib import pyplot as plt from keras.models import load_model from PIL import Image from sklearn.model_selection import train_test_split import os model = load_model('../input/facenet/keras-facenet/model/facenet_keras.h5') def extract_face(filename, required_size=(160, 160)): image = Image.open(filename) image = image.convert('RGB') pixels = np.asarray(image) image = Image.fromarray(pixels) image = image.resize(required_size) face_array = np.asarray(image) return face_array def load_face(dir): faces = list() for filename in os.listdir(dir): path = dir + filename face = extract_face(path) faces.append(face) return faces def load_dataset(dir): X, y = (list(), list()) for subdir in os.listdir(dir): path = dir + subdir + '/' faces = load_face(path) labels = [subdir for i in range(len(faces))] X.extend(faces) y.extend(labels) return (np.asarray(X), np.asarray(y)) def get_embedding(face_pixels): face_pixels = face_pixels.astype('float32') mean, std = (face_pixels.mean(), face_pixels.std()) face_pixels = (face_pixels - mean) / std samples = np.expand_dims(face_pixels, axis=0) yhat = model.predict(samples) return yhat[0] data = np.load('../input/new-masked-face/extracted_masked_unmasked.npz') trainX, trainy, testX, testy = (data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']) print(trainy.shape)
code
90146618/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.models import load_model from keras.models import load_model import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import cv2 from matplotlib import pyplot as plt from keras.models import load_model from PIL import Image from sklearn.model_selection import train_test_split import os model = load_model('../input/facenet/keras-facenet/model/facenet_keras.h5') def extract_face(filename, required_size=(160, 160)): image = Image.open(filename) image = image.convert('RGB') pixels = np.asarray(image) image = Image.fromarray(pixels) image = image.resize(required_size) face_array = np.asarray(image) return face_array def load_face(dir): faces = list() for filename in os.listdir(dir): path = dir + filename face = extract_face(path) faces.append(face) return faces def load_dataset(dir): X, y = (list(), list()) for subdir in os.listdir(dir): path = dir + subdir + '/' faces = load_face(path) labels = [subdir for i in range(len(faces))] X.extend(faces) y.extend(labels) return (np.asarray(X), np.asarray(y)) def get_embedding(face_pixels): face_pixels = face_pixels.astype('float32') mean, std = (face_pixels.mean(), face_pixels.std()) face_pixels = (face_pixels - mean) / std samples = np.expand_dims(face_pixels, axis=0) yhat = model.predict(samples) return yhat[0] data = np.load('../input/new-masked-face/extracted_masked_unmasked.npz') trainX, trainy, testX, testy = (data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']) print('Loaded: ', trainX.shape, trainy.shape, testX.shape, testy.shape)
code
90146618/cell_3
[ "text_plain_output_1.png" ]
from keras.models import load_model from keras.models import load_model model = load_model('../input/facenet/keras-facenet/model/facenet_keras.h5') print('Loaded Model')
code
90146618/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.models import load_model from keras.models import load_model import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import cv2 from matplotlib import pyplot as plt from keras.models import load_model from PIL import Image from sklearn.model_selection import train_test_split import os model = load_model('../input/facenet/keras-facenet/model/facenet_keras.h5') def extract_face(filename, required_size=(160, 160)): image = Image.open(filename) image = image.convert('RGB') pixels = np.asarray(image) image = Image.fromarray(pixels) image = image.resize(required_size) face_array = np.asarray(image) return face_array def load_face(dir): faces = list() for filename in os.listdir(dir): path = dir + filename face = extract_face(path) faces.append(face) return faces def load_dataset(dir): X, y = (list(), list()) for subdir in os.listdir(dir): path = dir + subdir + '/' faces = load_face(path) labels = [subdir for i in range(len(faces))] X.extend(faces) y.extend(labels) return (np.asarray(X), np.asarray(y)) def get_embedding(face_pixels): face_pixels = face_pixels.astype('float32') mean, std = (face_pixels.mean(), face_pixels.std()) face_pixels = (face_pixels - mean) / std samples = np.expand_dims(face_pixels, axis=0) yhat = model.predict(samples) return yhat[0] data = np.load('../input/new-masked-face/extracted_masked_unmasked.npz') trainX, trainy, testX, testy = (data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']) type(trainX)
code
32068320/cell_13
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False) predicted_std = model.predict(X_test) result_std = pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test loss = hist.history['loss'] epochs = len(loss) fig = plt.figure() plt.plot(range(epochs), loss, marker='.', label='loss(training data)') plt.show() predicted = scaler.inverse_transform(predicted_std) Y_test2 = scaler.inverse_transform(Y_test) result = pd.DataFrame(predicted) result.columns = ['predict'] result['actual'] = Y_test2 result.plot(figsize=(25, 6)) plt.show()
code
32068320/cell_9
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False) predicted_std = model.predict(X_test) result_std = pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test result_std.plot(figsize=(25, 6)) plt.show()
code
32068320/cell_2
[ "image_output_1.png" ]
import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font)
code
32068320/cell_19
[ "image_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.metrics import mean_squared_log_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import datetime import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False) predicted_std = model.predict(X_test) result_std = pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test loss = hist.history['loss'] epochs = len(loss) fig = plt.figure() plt.plot(range(epochs), loss, marker='.', label='loss(training data)') plt.show() predicted = scaler.inverse_transform(predicted_std) Y_test2 = scaler.inverse_transform(Y_test) np.sqrt(mean_squared_log_error(predicted, Y_test2)) result = pd.DataFrame(predicted) result.columns = ['predict'] result['actual'] = Y_test2 test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('../input/covid19-global-forecasting-week-4/submission.csv') temp = (datetime.datetime.strptime('2020-04-01', '%Y-%m-%d') - datetime.timedelta(days=time_series_len)).strftime('%Y-%m-%d') test_df = train_df[train_df['Date'] > temp] check_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv').query("Date>'2020-04-01'and Date<='2020-04-14'") check_df['ConfirmedCases_std'] = scaler.transform(check_df['ConfirmedCases'].values.reshape(len(check_df['ConfirmedCases'].values), 1)) confirmedCases_pred = [] for i in range(0, 313 * time_series_len, time_series_len): temp_array = np.array(test_df['ConfirmedCases_std'][i:i + time_series_len]) for j in range(43): if j < 13: temp_array = np.append(temp_array, np.array(check_df['ConfirmedCases_std'])[int(i * 13 / time_series_len) + j]) elif np.array(test_df['ConfirmedCases'][i:i + time_series_len]).sum() == 0: temp_array = np.append(temp_array, temp_array[-1]) else: temp_array = np.append(temp_array, model.predict(temp_array[-time_series_len:].reshape(1, time_series_len, 1))) confirmedCases_pred.append(temp_array[-43:]) submission['ConfirmedCases'] = np.abs(scaler.inverse_transform(np.array(confirmedCases_pred).reshape(313 * 43))) submission['ConfirmedCases_std'] = np.array(confirmedCases_pred).reshape(313 * 43) submission
code
32068320/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068320/cell_8
[ "text_html_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False)
code
32068320/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df
code
32068320/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False) predicted_std = model.predict(X_test) result_std = pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test loss = hist.history['loss'] epochs = len(loss) fig = plt.figure() plt.plot(range(epochs), loss, marker='.', label='loss(training data)') plt.show()
code
32068320/cell_12
[ "text_html_output_1.png" ]
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.initializers import random_uniform from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.models import Sequential from keras.optimizers import Adagrad from sklearn.metrics import mean_squared_log_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import GRU from keras.initializers import random_uniform from keras.optimizers import Adagrad from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras import backend as K import tensorflow as tf import datetime import matplotlib.pyplot as plt plt.style.use('ggplot') font = {'family': 'meiryo'} plt.rc('font', **font) train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_df = train_df.fillna('No State') train_df test_rate = 0.1 time_series_len = 18 train_date_count = len(set(train_df['Date'])) X, Y = ([], []) scaler = StandardScaler() train_df['ConfirmedCases_std'] = scaler.fit_transform(train_df['ConfirmedCases'].values.reshape(len(train_df['ConfirmedCases'].values), 1)) for state, country in train_df.groupby(['Province_State', 'Country_Region']).sum().index: df = train_df[(train_df['Country_Region'] == country) & (train_df['Province_State'] == state)] if df['ConfirmedCases'].sum() != 0: for i in range(len(df) - time_series_len): X.append(df[['ConfirmedCases_std']].iloc[i:i + time_series_len].values) Y.append(df[['ConfirmedCases_std']].iloc[i + time_series_len].values) X = np.array(X) Y = np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle=True, random_state=0) def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error) < clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) epochs_num = 20 n_in = 1 model = Sequential() model.add(GRU(100, batch_input_shape=(None, time_series_len, n_in), kernel_initializer=random_uniform(seed=0), return_sequences=False)) model.add(Dense(50)) model.add(Dropout(0.15, seed=0)) model.add(Dense(n_in, kernel_initializer=random_uniform(seed=0))) model.add(Activation('linear')) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001) model.compile(loss=huber_loss_mean, optimizer=opt) callbacks = [ReduceLROnPlateau(monitor='loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='loss', patience=10)] hist = model.fit(X_train, Y_train, batch_size=20, epochs=epochs_num, callbacks=callbacks, shuffle=False) predicted_std = model.predict(X_test) result_std = pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test predicted = scaler.inverse_transform(predicted_std) Y_test2 = scaler.inverse_transform(Y_test) np.sqrt(mean_squared_log_error(predicted, Y_test2))
code
122258225/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.isna().sum()
code
122258225/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.head()
code
122258225/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.info()
code
122258225/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_train.isna().sum() df_titanic_train['Sex'].value_counts()
code
122258225/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') df_gender_submission.head()
code
122258225/cell_20
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_train.isna().sum()
code
122258225/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_train.isna().sum() df_titanic_train['Embarked'].value_counts()
code
122258225/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.describe()
code
122258225/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122258225/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.head()
code
122258225/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_train['Embarked'].mode()
code
122258225/cell_35
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_train.isna().sum() df_titanic_test.isna().sum() model = LogisticRegression() x = df_titanic_train.loc[:, df_titanic_train.columns != 'Survived'] y = df_titanic_train['Survived'] model.fit(x, y) y_pred = model.predict(df_titanic_test) df_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') df_true_answers = df_gender_submission['Survived'] print(accuracy_score(df_true_answers, y_pred) * 100)
code
122258225/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.drop(labels=['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) df_titanic_test.isna().sum() df_titanic_test['Sex'].value_counts()
code
122258225/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_train.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_test.drop(labels=['Cabin'], axis=1, inplace=True) df_titanic_train.describe()
code
122258225/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_titanic_test.info()
code
2035583/cell_9
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OneVsRestClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import LinearSVC import numpy as np # linear algebra from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, Y) x_train = np.array(x_train).reshape(len(x_train), 10) x_test = np.array(x_test).reshape(len(x_test), 10) y_train = y_train.values.ravel() y_test = y_test.values.ravel() def fit_and_score(x_train, x_validation, y_train, y_validation): names = ['OneVsrest', 'OneVsOne', 'MultinomialNB', 'AdaBoost', 'LinearRegression', 'DecisionTreeRegressor', 'AdaBoostRegressor', 'GradientBoostingRegressor'] models = [OneVsRestClassifier(LinearSVC(random_state=0)), OneVsOneClassifier(LinearSVC(random_state=0)), MultinomialNB(), AdaBoostClassifier(), LinearRegression(), tree.DecisionTreeRegressor(), AdaBoostRegressor(), GradientBoostingRegressor()] scores_train = [] scores_validation = [] for model in models: model.fit(x_train, y_train) scores_train.append(model.score(x_train, y_train)) scores_validation.append(model.score(x_validation, y_validation)) return (names, scores_train, scores_validation) nome, resultado_treino, resultado_validacao = fit_and_score(x_train, x_test, y_train, y_test) print(' Results ') for n, r_train, r_validation in zip(nome, resultado_treino, resultado_validacao): print('_' * 30) print('Model: {}'.format(n)) print('Score train: {:0.3}'.format(r_train)) print('Score validation: {:0.3}'.format(r_validation)) print('\n')
code
2035583/cell_4
[ "text_html_output_1.png" ]
X.head()
code
2035583/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2035583/cell_5
[ "text_plain_output_1.png" ]
print(X.isnull().any())
code
17135521/cell_34
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns df_test = pd.read_csv('../input/test.csv') df_test[['Id', 'SalePrice']].head()
code
17135521/cell_23
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns X = df_tidy[['OverallQual']] y = df_tidy['SalePrice'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) print(X_train.count()) print(X_test.count())
code
17135521/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns df_test = pd.read_csv('../input/test.csv') df_test.head()
code
17135521/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns df_test = pd.read_csv('../input/test.csv') df_test.head()
code
17135521/cell_26
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/train.csv') corrmat = df.corr() # ヒートマップに表示させるカラムの数 k = 10 # SalesPriceとの相関が大きい上位10個のカラム名を取得 cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index # SalesPriceとの相関が大きい上位10個のカラムを対象に相関を算出 # .T(Trancepose[転置行列])を行う理由は、corrcoefで相関を算出する際に、各カラムの値を行毎にまとめなければならない為 cm = np.corrcoef(df[cols].values.T) # ヒートマップのフォントサイズを指定 sns.set(font_scale=1.25) # 算出した相関データをヒートマップで表示 hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns X = df_tidy[['OverallQual']] y = df_tidy['SalePrice'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) slr = LinearRegression() slr.fit(X_train, y_train) plt.scatter(X_train, y_train) plt.plot(X_train, slr.predict(X_train), color='red') plt.show()
code
17135521/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/train.csv') corrmat = df.corr() k = 10 cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
17135521/cell_19
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns
code
17135521/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') len(df)
code
17135521/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df.describe()
code
17135521/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15)
code
17135521/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input')) import seaborn as sns from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt
code
17135521/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy.describe()
code
17135521/cell_31
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/train.csv') corrmat = df.corr() # ヒートマップに表示させるカラムの数 k = 10 # SalesPriceとの相関が大きい上位10個のカラム名を取得 cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index # SalesPriceとの相関が大きい上位10個のカラムを対象に相関を算出 # .T(Trancepose[転置行列])を行う理由は、corrcoefで相関を算出する際に、各カラムの値を行毎にまとめなければならない為 cm = np.corrcoef(df[cols].values.T) # ヒートマップのフォントサイズを指定 sns.set(font_scale=1.25) # 算出した相関データをヒートマップで表示 hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns X = df_tidy[['OverallQual']] y = df_tidy['SalePrice'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) slr = LinearRegression() slr.fit(X_train, y_train) from sklearn.metrics import r2_score y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) df_test = pd.read_csv('../input/test.csv') X_test = df_test[['OverallQual']].values y_test_pred = slr.predict(X_test) y_test_pred
code
17135521/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns X = df_tidy[['OverallQual']] y = df_tidy['SalePrice'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) slr = LinearRegression() slr.fit(X_train, y_train)
code
17135521/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() df.isnull().sum().sort_values().tail(15)
code
17135521/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') corrmat = df.corr() corrmat.head()
code
17135521/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/train.csv') corrmat = df.corr() # ヒートマップに表示させるカラムの数 k = 10 # SalesPriceとの相関が大きい上位10個のカラム名を取得 cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index # SalesPriceとの相関が大きい上位10個のカラムを対象に相関を算出 # .T(Trancepose[転置行列])を行う理由は、corrcoefで相関を算出する際に、各カラムの値を行毎にまとめなければならない為 cm = np.corrcoef(df[cols].values.T) # ヒートマップのフォントサイズを指定 sns.set(font_scale=1.25) # 算出した相関データをヒートマップで表示 hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() df.isnull().sum().sort_values().tail(15) df_tidy = df.fillna({'PoolQC': 'Nothing', 'MiscFeature': 'Nothing', 'Alley': 'Nothing', 'Fence': 'Nothing', 'FireplaceQu': 'Nothing', 'LotFrontage': 0}).dropna() df_tidy.isnull().sum().sort_values().tail(15) df_tidy = pd.get_dummies(df_tidy, columns=['MSZoning', 'LotFrontage', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition'], drop_first=True) df_tidy.columns X = df_tidy[['OverallQual']] y = df_tidy['SalePrice'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) slr = LinearRegression() slr.fit(X_train, y_train) from sklearn.metrics import r2_score y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) print('Accuracy on Training Set: {:.3f}'.format(r2_score(y_train, y_train_pred))) print('Accuracy on Validation Set: {:.3f}'.format(r2_score(y_test, y_test_pred)))
code
17135521/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df.head()
code
104131002/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe num_cols
code
104131002/cell_25
[ "text_plain_output_1.png" ]
from scipy import stats import datetime as dt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) def high_correlated_cols(dataframe, plot=False, corr_th=0.9): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: import seaborn as sns import matplotlib.pyplot as plt sns.set(rc={'figure.figsize': (15, 15)}) return drop_list high_correlated_cols(df, plot=True) df['last_order_date'].max() analysis_date = dt.datetime(2021, 6, 1) df['recency'] = (analysis_date - df['last_order_date']).astype('timedelta64[D]') df['tenure'] = (df['last_order_date'] - df['first_order_date']).astype('timedelta64[D]') model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] def check_skew(df_skew, column): skew = stats.skew(df_skew[column]) skewtest = stats.skewtest(df_skew[column]) plt.title('Distribution of ' + column) sns.histplot(df_skew[column], color='g') print("{}'s: Skew: {}, : {}".format(column, skew, skewtest)) return plt.figure(figsize=(9, 9)) plt.subplot(6, 1, 1) check_skew(model_df, 'order_num_total_ever_online') plt.subplot(6, 1, 2) check_skew(model_df, 'order_num_total_ever_offline') plt.subplot(6, 1, 3) check_skew(model_df, 'customer_value_total_ever_offline') plt.subplot(6, 1, 4) check_skew(model_df, 'customer_value_total_ever_online') plt.subplot(6, 1, 5) check_skew(model_df, 'recency') plt.subplot(6, 1, 6) check_skew(model_df, 'tenure') plt.tight_layout() plt.savefig('before_transform.png', format='png', dpi=1000) plt.show()
code
104131002/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) def high_correlated_cols(dataframe, plot=False, corr_th=0.9): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: import seaborn as sns import matplotlib.pyplot as plt sns.set(rc={'figure.figsize': (15, 15)}) sns.heatmap(corr, cmap='RdBu') plt.show() return drop_list high_correlated_cols(df, plot=True)
code
104131002/cell_6
[ "text_html_output_1.png" ]
!pip install xgboost !pip install lightgbm !pip install catboost import numpy as np import datetime as dt import pandas as pd import seaborn as sns from scipy import stats from sklearn.cluster import AgglomerativeClustering from sklearn.linear_model import Ridge, Lasso, ElasticNet from sklearn.metrics import mean_squared_error,r2_score from sklearn.model_selection import train_test_split, cross_val_score from sklearn import model_selection import matplotlib.pyplot as plt from sklearn.linear_model import RidgeCV, LassoCV, ElasticNetCV from warnings import filterwarnings filterwarnings('ignore') !pip install missingno import missingno as msno from sklearn.preprocessing import RobustScaler from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix, classification_report, plot_roc_curve from sklearn.model_selection import train_test_split, cross_validate import warnings warnings.simplefilter(action='ignore') import xgboost from sklearn.impute import KNNImputer from sklearn import preprocessing from sklearn.neighbors import LocalOutlierFactor import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, Lasso, LassoCV from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.neural_network import MLPRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.svm import SVR from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor import random from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from yellowbrick.cluster import KElbowVisualizer from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import dendrogram from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.preprocessing import LabelEncoder pd.set_option('display.max_columns', None) pd.set_option('display.width', 500)
code
104131002/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) def high_correlated_cols(dataframe, plot=False, corr_th=0.9): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: import seaborn as sns import matplotlib.pyplot as plt sns.set(rc={'figure.figsize': (15, 15)}) return drop_list high_correlated_cols(df, plot=True) df['last_order_date'].max() analysis_date = dt.datetime(2021, 6, 1) df['recency'] = (analysis_date - df['last_order_date']).astype('timedelta64[D]') df['tenure'] = (df['last_order_date'] - df['first_order_date']).astype('timedelta64[D]') model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) model_df.head()
code
104131002/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) df.info()
code
104131002/cell_8
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() df.head()
code
104131002/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns # Functions def check_df(dataframe, head=5): print("##################### Shape #####################") print(dataframe.shape) print("##################### Types #####################") print(dataframe.dtypes) print("##################### Head #####################") print(dataframe.head(head)) print("##################### Tail #####################") print(dataframe.tail(head)) print("##################### NA #####################") print(dataframe.isnull().sum()) print("##################### Quantiles #####################") print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T) def cat_summary(dataframe, col_name, plot=False): print(pd.DataFrame({col_name: dataframe[col_name].value_counts(), "Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe)})) print("##########################################") if plot: sns.countplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) def num_summary(dataframe, numerical_col, plot=False): quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99] print(dataframe[numerical_col].describe(quantiles).T) if plot: dataframe[numerical_col].hist(bins=20) plt.xlabel(numerical_col) plt.title(numerical_col) plt.show(block=True) def target_summary_with_num(dataframe, target, numerical_col): print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n") def target_summary_with_cat(dataframe, target, categorical_col): print(pd.DataFrame({"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}), end="\n\n\n") def correlation_matrix(df, cols): fig = plt.gcf() fig.set_size_inches(10, 8) plt.xticks(fontsize=10) plt.yticks(fontsize=10) fig = sns.heatmap(df[cols].corr(), annot=True, linewidths=0.5, annot_kws={'size': 12}, linecolor='w', cmap='RdBu') plt.show(block=True) def grab_col_names(dataframe, cat_th=10, car_th=20): """ Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir. Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir. Parameters ------ dataframe: dataframe Değişken isimleri alınmak istenilen dataframe cat_th: int, optional numerik fakat kategorik olan değişkenler için sınıf eşik değeri car_th: int, optinal kategorik fakat kardinal değişkenler için sınıf eşik değeri Returns ------ cat_cols: list Kategorik değişken listesi num_cols: list Numerik değişken listesi cat_but_car: list Kategorik görünümlü kardinal değişken listesi Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = toplam değişken sayısı num_but_cat cat_cols'un içerisinde. Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı """ # cat_cols, cat_but_car cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] # num_cols num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"] num_cols = [col for col in num_cols if col not in num_but_cat] # print(f"Observations: {dataframe.shape[0]}") # print(f"Variables: {dataframe.shape[1]}") # print(f'cat_cols: {len(cat_cols)}') # print(f'num_cols: {len(num_cols)}') # print(f'cat_but_car: {len(cat_but_car)}') # print(f'num_but_cat: {len(num_but_cat)}') return cat_cols, num_cols, cat_but_car def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) print(missing_df, end="\n") if na_name: return na_columns def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75): quartile1 = dataframe[col_name].quantile(q1) quartile3 = dataframe[col_name].quantile(q3) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit def replace_with_thresholds(dataframe, variable): low_limit, up_limit = outlier_thresholds(dataframe, variable) dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit def check_outlier(dataframe, col_name, q1=0.25, q3=0.75): low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False def one_hot_encoder(dataframe, categorical_cols, drop_first=False): dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first) return dataframe df_ = pd.read_csv('../input/customer-segmentation-with-unsupervised-learning/flo_data_20k.csv') df = df_.copy() for col in cat_cols: cat_summary(df, col)
code