path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90103033/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby('left').mean() df_new = df[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] dummy_salary = pd.get_dummies(df_new.salary, prefix='salary') df_new_with_dummy = pd.concat([df_new, dummy_salary], axis='columns') df_new_with_dummy.drop('salary', axis='columns', inplace=True) df_new_with_dummy.head()
code
90103033/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby('left').mean() df_new = df[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] df_new.head()
code
90103033/cell_22
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) model.predict(X_test)
code
90103033/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby('left').mean() pd.crosstab(df.salary, df.left).plot(kind='bar')
code
90103033/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby('left').mean() pd.crosstab(df.Department, df.left).plot(kind='bar')
code
90103033/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = df[df.left == 1] left.shape
code
16162987/cell_6
[ "text_plain_output_1.png" ]
import numpy as np f = open('../input/glove840b300dtxt/glove.840B.300d.txt', encoding='utf-8') embeddings_index = {} for line in f: values = line.split() word = ''.join(values[:-300]) coefs = np.asarray(values[-300:], dtype='float32') embeddings_index[word] = coefs f.close() print('Found {} word vectors of glove.'.format(len(embeddings_index)))
code
18159032/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew # To compute statistic metrics from subprocess import check_output import matplotlib.pyplot as plt # Graphs import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them # Deleting outliers train = train.drop( train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index ) # Check the scatter plot one more time fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() ## TARGET VARIABLE # Let's analyse the output variable: SalePrice sns.distplot(train['SalePrice'], fit = norm) # fit argument to get the parameters of the fitting normal distribution (mu, sigma) = norm.fit(train['SalePrice']) print('mean: ', mu, ' sigma: ', sigma) # Plot the corresponding normal distribution plt.legend(['Normal dist. (mu = %r, sigma = %r)' %(mu, sigma)], loc = 'best') plt.ylabel('Frequency') plt.xlabel('SalePrice distribution') # Also make the QQ-plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() # -> SalePrice is right skewed (Asymetry of the proba distrubution) # 'Linear models love normally distributed data', thus transform it to make it more Gaussian train['SalePrice'] = np.log1p(train['SalePrice']) mu, sigma = norm.fit(train['SalePrice']) m_train = train.shape[0] m_test = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat((train, test)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) all_data_na = all_data.isnull().sum() / len(all_data) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False) missing_data = pd.DataFrame({'Missing Ratio': all_data_na}) missing_data f, ax = plt.subplots(figsize=(15, 12)) plt.xticks(rotation='90') sns.barplot(x=all_data_na.index, y=all_data_na) plt.xlabel('Features', fontsize=15) plt.ylabel('Percentage of missing values', fontsize=15) plt.title('Percent missing data by feature', fontsize=15)
code
18159032/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew # To compute statistic metrics from subprocess import check_output import matplotlib.pyplot as plt # Graphs import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them # Deleting outliers train = train.drop( train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index ) # Check the scatter plot one more time fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() sns.distplot(train['SalePrice'], fit=norm) mu, sigma = norm.fit(train['SalePrice']) print('mean: ', mu, ' sigma: ', sigma) plt.legend(['Normal dist. (mu = %r, sigma = %r)' % (mu, sigma)], loc='best') plt.ylabel('Frequency') plt.xlabel('SalePrice distribution') fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show()
code
18159032/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code
18159032/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv')
code
18159032/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt # Graphs import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them # Deleting outliers train = train.drop( train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index ) # Check the scatter plot one more time fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() m_train = train.shape[0] m_test = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat((train, test)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) print('Shape of all_data: ', all_data.shape)
code
18159032/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt # Graphs import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) fig, ax = plt.subplots() ax.scatter(x=train['GrLivArea'], y=train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show()
code
18159032/cell_8
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt # Graphs import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them train = train.drop(train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index) fig, ax = plt.subplots() ax.scatter(x=train['GrLivArea'], y=train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show()
code
18159032/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
18159032/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew # To compute statistic metrics from subprocess import check_output import matplotlib.pyplot as plt # Graphs import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them # Deleting outliers train = train.drop( train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index ) # Check the scatter plot one more time fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() ## TARGET VARIABLE # Let's analyse the output variable: SalePrice sns.distplot(train['SalePrice'], fit = norm) # fit argument to get the parameters of the fitting normal distribution (mu, sigma) = norm.fit(train['SalePrice']) print('mean: ', mu, ' sigma: ', sigma) # Plot the corresponding normal distribution plt.legend(['Normal dist. (mu = %r, sigma = %r)' %(mu, sigma)], loc = 'best') plt.ylabel('Frequency') plt.xlabel('SalePrice distribution') # Also make the QQ-plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() # -> SalePrice is right skewed (Asymetry of the proba distrubution) # 'Linear models love normally distributed data', thus transform it to make it more Gaussian train['SalePrice'] = np.log1p(train['SalePrice']) sns.distplot(train['SalePrice'], fit=norm) mu, sigma = norm.fit(train['SalePrice']) print('Mean: ', mu, ' Sigma: ', sigma) plt.legend(['Normal dist. (mu = %r, sigma = %r)' % (mu, sigma)], loc='best') plt.ylabel('Frequency') plt.xlabel('SalePrice distribution')
code
18159032/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt # Graphs import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) # Let's find out the outliers fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # -> high correlation between the two columns # Outliers at the bottom right, we can delete them # Deleting outliers train = train.drop( train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index ) # Check the scatter plot one more time fig, ax = plt.subplots() # Scatter plot ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() m_train = train.shape[0] m_test = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat((train, test)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) all_data_na = all_data.isnull().sum() / len(all_data) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False) missing_data = pd.DataFrame({'Missing Ratio': all_data_na}) missing_data
code
18159032/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print('The TRAIN data size before dropping ID feature is: ', train.shape) print('The TEST data size before dropping ID feature is: ', test.shape) train_ID = train['Id'] test_ID = test['Id'] train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) print('The TRAIN data size before dropping ID feature is: ', train.shape) print('The TEST data size before dropping ID feature is: ', test.shape)
code
32063106/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.boxplot(x='Year', y='AveragePrice', data=df)
code
32063106/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique()
code
32063106/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.head()
code
32063106/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape
code
32063106/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.scatterplot(x='Month', y='AveragePrice', data=df)
code
32063106/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.distplot(df['Total Volume'])
code
32063106/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.distplot(df['AveragePrice'])
code
32063106/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.countplot(x='type', data=df)
code
32063106/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts()
code
32063106/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.barplot(x='type', y='AveragePrice', data=df)
code
32063106/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.head()
code
32063106/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.barplot(x='type', y='Total Volume', data=df)
code
32063106/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.tail(20)
code
32063106/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape df.region.unique() df.region.value_counts() sns.boxplot(x='Month', y='AveragePrice', data=df)
code
32063106/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.drop(columns='year', inplace=True) df[['Year', 'Month', 'day']] = df.Date.str.split('-', expand=True) df.drop(columns='Date', inplace=True) df.drop_duplicates(inplace=True) df.shape
code
32063106/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/avocado-prices/avocado.csv') df.shape df.info()
code
2040421/cell_4
[ "text_html_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'}) grpCount = grpCount[grpCount.Count >= cut] grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'}) grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'}) grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'}) grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'}) grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'}) grpOutcomes = grpCount.merge(grpMean, on=features) grpOutcomes = grpOutcomes.merge(grpMedian, on=features) grpOutcomes = grpOutcomes.merge(grpMin, on=features) grpOutcomes = grpOutcomes.merge(grpMax, on=features) grpOutcomes = grpOutcomes.merge(grpStd, on=features) x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] x['Outcomes'] = data2['visitors'].values if useLOO: nonnulls = ~x.Count.isnull() x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes if addNoise is True: x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) else: x.loc[nonnulls, 'Count'] -= 1 x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count x.loc[nonnulls, 'Median'] /= x[nonnulls].Count x.Count = np.log1p(x.Count) return x.fillna(x.mean())[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] def MungeTrain(): air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) train = air_visit_data.merge(air_store_info, on='air_store_id') train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') train = train.merge(store_id_relation, on='air_store_id', how='left') train = train.merge(hpg_store_info, on='hpg_store_id', how='left') train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') train = train.merge(date_info, on='visit_date', how='left') train['year'] = train.visit_date.dt.year train['month'] = train.visit_date.dt.month train.reserve_visitors_x = train.reserve_visitors_x.fillna(0) train.reserve_visitors_y = train.reserve_visitors_y.fillna(0) train.reserve_visitors_x = np.log1p(train.reserve_visitors_x) train.reserve_visitors_y = np.log1p(train.reserve_visitors_y) train.visitors = np.log1p(train.visitors) train.drop(['latitude', 'longitude'], inplace=True, axis=1) train = train.fillna(-1) train = train.sort_values(by='visit_date') return train def MungeTest(columns): air_visit_data = pd.read_csv('../input/sample_submission.csv') air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:]))) air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11]) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) test = air_visit_data.merge(air_store_info, on='air_store_id') test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') test = test.merge(store_id_relation, on='air_store_id', how='left') test = test.merge(hpg_store_info, on='hpg_store_id', how='left') test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') test = test.merge(date_info, on='visit_date', how='left') test['year'] = test.visit_date.dt.year test['month'] = test.visit_date.dt.month test.reserve_visitors_x = test.reserve_visitors_x.fillna(0) test.reserve_visitors_y = test.reserve_visitors_y.fillna(0) test.reserve_visitors_x = np.log1p(test.reserve_visitors_x) test.reserve_visitors_y = np.log1p(test.reserve_visitors_y) test = test.fillna(-1) test = test.sort_values(by='visit_date') test.visitors = np.log1p(test.visitors) return test[list(['id']) + list(columns)] train = MungeTrain() test = MungeTest(train.columns) train.head()
code
2040421/cell_7
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'}) grpCount = grpCount[grpCount.Count >= cut] grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'}) grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'}) grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'}) grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'}) grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'}) grpOutcomes = grpCount.merge(grpMean, on=features) grpOutcomes = grpOutcomes.merge(grpMedian, on=features) grpOutcomes = grpOutcomes.merge(grpMin, on=features) grpOutcomes = grpOutcomes.merge(grpMax, on=features) grpOutcomes = grpOutcomes.merge(grpStd, on=features) x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] x['Outcomes'] = data2['visitors'].values if useLOO: nonnulls = ~x.Count.isnull() x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes if addNoise is True: x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) else: x.loc[nonnulls, 'Count'] -= 1 x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count x.loc[nonnulls, 'Median'] /= x[nonnulls].Count x.Count = np.log1p(x.Count) return x.fillna(x.mean())[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] def MungeTrain(): air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) train = air_visit_data.merge(air_store_info, on='air_store_id') train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') train = train.merge(store_id_relation, on='air_store_id', how='left') train = train.merge(hpg_store_info, on='hpg_store_id', how='left') train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') train = train.merge(date_info, on='visit_date', how='left') train['year'] = train.visit_date.dt.year train['month'] = train.visit_date.dt.month train.reserve_visitors_x = train.reserve_visitors_x.fillna(0) train.reserve_visitors_y = train.reserve_visitors_y.fillna(0) train.reserve_visitors_x = np.log1p(train.reserve_visitors_x) train.reserve_visitors_y = np.log1p(train.reserve_visitors_y) train.visitors = np.log1p(train.visitors) train.drop(['latitude', 'longitude'], inplace=True, axis=1) train = train.fillna(-1) train = train.sort_values(by='visit_date') return train def MungeTest(columns): air_visit_data = pd.read_csv('../input/sample_submission.csv') air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:]))) air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11]) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) test = air_visit_data.merge(air_store_info, on='air_store_id') test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') test = test.merge(store_id_relation, on='air_store_id', how='left') test = test.merge(hpg_store_info, on='hpg_store_id', how='left') test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') test = test.merge(date_info, on='visit_date', how='left') test['year'] = test.visit_date.dt.year test['month'] = test.visit_date.dt.month test.reserve_visitors_x = test.reserve_visitors_x.fillna(0) test.reserve_visitors_y = test.reserve_visitors_y.fillna(0) test.reserve_visitors_x = np.log1p(test.reserve_visitors_x) test.reserve_visitors_y = np.log1p(test.reserve_visitors_y) test = test.fillna(-1) test = test.sort_values(by='visit_date') test.visitors = np.log1p(test.visitors) return test[list(['id']) + list(columns)] train = MungeTrain() test = MungeTest(train.columns) twoweeks = train.visit_date.max() - pd.Timedelta(days=14) vistrain = train[train.visit_date < twoweeks].copy() blindtrain = train[train.visit_date >= twoweeks].copy() print(vistrain.shape) print(blindtrain.shape)
code
2040421/cell_8
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'}) grpCount = grpCount[grpCount.Count >= cut] grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'}) grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'}) grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'}) grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'}) grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'}) grpOutcomes = grpCount.merge(grpMean, on=features) grpOutcomes = grpOutcomes.merge(grpMedian, on=features) grpOutcomes = grpOutcomes.merge(grpMin, on=features) grpOutcomes = grpOutcomes.merge(grpMax, on=features) grpOutcomes = grpOutcomes.merge(grpStd, on=features) x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] x['Outcomes'] = data2['visitors'].values if useLOO: nonnulls = ~x.Count.isnull() x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes if addNoise is True: x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) else: x.loc[nonnulls, 'Count'] -= 1 x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count x.loc[nonnulls, 'Median'] /= x[nonnulls].Count x.Count = np.log1p(x.Count) return x.fillna(x.mean())[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] def MungeTrain(): air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) train = air_visit_data.merge(air_store_info, on='air_store_id') train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') train = train.merge(store_id_relation, on='air_store_id', how='left') train = train.merge(hpg_store_info, on='hpg_store_id', how='left') train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') train = train.merge(date_info, on='visit_date', how='left') train['year'] = train.visit_date.dt.year train['month'] = train.visit_date.dt.month train.reserve_visitors_x = train.reserve_visitors_x.fillna(0) train.reserve_visitors_y = train.reserve_visitors_y.fillna(0) train.reserve_visitors_x = np.log1p(train.reserve_visitors_x) train.reserve_visitors_y = np.log1p(train.reserve_visitors_y) train.visitors = np.log1p(train.visitors) train.drop(['latitude', 'longitude'], inplace=True, axis=1) train = train.fillna(-1) train = train.sort_values(by='visit_date') return train def MungeTest(columns): air_visit_data = pd.read_csv('../input/sample_submission.csv') air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:]))) air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11]) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) test = air_visit_data.merge(air_store_info, on='air_store_id') test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') test = test.merge(store_id_relation, on='air_store_id', how='left') test = test.merge(hpg_store_info, on='hpg_store_id', how='left') test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') test = test.merge(date_info, on='visit_date', how='left') test['year'] = test.visit_date.dt.year test['month'] = test.visit_date.dt.month test.reserve_visitors_x = test.reserve_visitors_x.fillna(0) test.reserve_visitors_y = test.reserve_visitors_y.fillna(0) test.reserve_visitors_x = np.log1p(test.reserve_visitors_x) test.reserve_visitors_y = np.log1p(test.reserve_visitors_y) test = test.fillna(-1) test = test.sort_values(by='visit_date') test.visitors = np.log1p(test.visitors) return test[list(['id']) + list(columns)] train = MungeTrain() test = MungeTest(train.columns) twoweeks = train.visit_date.max() - pd.Timedelta(days=14) vistrain = train[train.visit_date < twoweeks].copy() blindtrain = train[train.visit_date >= twoweeks].copy() features = ['day_of_week', 'holiday_flg', 'year', 'month'] for c in features: print(c) test[c + '_Count_Store'] = np.nan test[c + '_Mean_Store'] = np.nan test[c + '_Median_Store'] = np.nan test[c + '_Max_Store'] = np.nan test[c + '_Min_Store'] = np.nan test[c + '_Std_Store'] = np.nan vistrain[c + '_Count_Store'] = np.nan vistrain[c + '_Mean_Store'] = np.nan vistrain[c + '_Median_Store'] = np.nan vistrain[c + '_Max_Store'] = np.nan vistrain[c + '_Min_Store'] = np.nan vistrain[c + '_Std_Store'] = np.nan blindtrain[c + '_Count_Store'] = np.nan blindtrain[c + '_Mean_Store'] = np.nan blindtrain[c + '_Median_Store'] = np.nan blindtrain[c + '_Max_Store'] = np.nan blindtrain[c + '_Min_Store'] = np.nan blindtrain[c + '_Std_Store'] = np.nan test[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, test, list(['air_store_id']), c, useLOO=True, cut=5).values blindtrain[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, blindtrain, list(['air_store_id']), c, useLOO=True, cut=5).values vistrain[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, vistrain, list(['air_store_id']), c, useLOO=True, cut=5, addNoise=False).values features = ['air_store_id', 'air_genre_name', 'air_area_name', 'hpg_store_id', 'hpg_genre_name', 'hpg_area_name', 'day_of_week', 'holiday_flg', 'year', 'month'] for c in features: print(c) test[c + '_Count'] = np.nan test[c + '_Mean'] = np.nan test[c + '_Median'] = np.nan test[c + '_Max'] = np.nan test[c + '_Min'] = np.nan test[c + '_Std'] = np.nan vistrain[c + '_Count'] = np.nan vistrain[c + '_Mean'] = np.nan vistrain[c + '_Median'] = np.nan vistrain[c + '_Max'] = np.nan vistrain[c + '_Min'] = np.nan vistrain[c + '_Std'] = np.nan blindtrain[c + '_Count'] = np.nan blindtrain[c + '_Mean'] = np.nan blindtrain[c + '_Median'] = np.nan blindtrain[c + '_Max'] = np.nan blindtrain[c + '_Min'] = np.nan blindtrain[c + '_Std'] = np.nan test[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), test.copy(), list([]), c, useLOO=False, cut=10).values blindtrain[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), blindtrain.copy(), list([]), c, useLOO=False, cut=10).values vistrain[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), vistrain.copy(), list([]), c, useLOO=True, cut=10, addNoise=False).values test.drop(c, inplace=True, axis=1) blindtrain.drop(c, inplace=True, axis=1) vistrain.drop(c, inplace=True, axis=1)
code
2040421/cell_5
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'}) grpCount = grpCount[grpCount.Count >= cut] grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'}) grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'}) grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'}) grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'}) grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'}) grpOutcomes = grpCount.merge(grpMean, on=features) grpOutcomes = grpOutcomes.merge(grpMedian, on=features) grpOutcomes = grpOutcomes.merge(grpMin, on=features) grpOutcomes = grpOutcomes.merge(grpMax, on=features) grpOutcomes = grpOutcomes.merge(grpStd, on=features) x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] x['Outcomes'] = data2['visitors'].values if useLOO: nonnulls = ~x.Count.isnull() x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes if addNoise is True: x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0]) else: x.loc[nonnulls, 'Count'] -= 1 x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count x.loc[nonnulls, 'Median'] /= x[nonnulls].Count x.Count = np.log1p(x.Count) return x.fillna(x.mean())[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']] def MungeTrain(): air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) train = air_visit_data.merge(air_store_info, on='air_store_id') train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') train = train.merge(store_id_relation, on='air_store_id', how='left') train = train.merge(hpg_store_info, on='hpg_store_id', how='left') train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') train = train.merge(date_info, on='visit_date', how='left') train['year'] = train.visit_date.dt.year train['month'] = train.visit_date.dt.month train.reserve_visitors_x = train.reserve_visitors_x.fillna(0) train.reserve_visitors_y = train.reserve_visitors_y.fillna(0) train.reserve_visitors_x = np.log1p(train.reserve_visitors_x) train.reserve_visitors_y = np.log1p(train.reserve_visitors_y) train.visitors = np.log1p(train.visitors) train.drop(['latitude', 'longitude'], inplace=True, axis=1) train = train.fillna(-1) train = train.sort_values(by='visit_date') return train def MungeTest(columns): air_visit_data = pd.read_csv('../input/sample_submission.csv') air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:]))) air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11]) air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime']) air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime']) hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day)) store_id_relation = pd.read_csv('../input/store_id_relation.csv') date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'}) air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False) hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1) test = air_visit_data.merge(air_store_info, on='air_store_id') test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left') test = test.merge(store_id_relation, on='air_store_id', how='left') test = test.merge(hpg_store_info, on='hpg_store_id', how='left') test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left') test = test.merge(date_info, on='visit_date', how='left') test['year'] = test.visit_date.dt.year test['month'] = test.visit_date.dt.month test.reserve_visitors_x = test.reserve_visitors_x.fillna(0) test.reserve_visitors_y = test.reserve_visitors_y.fillna(0) test.reserve_visitors_x = np.log1p(test.reserve_visitors_x) test.reserve_visitors_y = np.log1p(test.reserve_visitors_y) test = test.fillna(-1) test = test.sort_values(by='visit_date') test.visitors = np.log1p(test.visitors) return test[list(['id']) + list(columns)] train = MungeTrain() test = MungeTest(train.columns) test.head()
code
50210004/cell_9
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(-3)
code
50210004/cell_4
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(10)
code
50210004/cell_6
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(0.75)
code
50210004/cell_7
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(20)
code
50210004/cell_8
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(-1)
code
50210004/cell_10
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value import math def binary_sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) if value >= 0.5: value = 1 else: value = 0 return value val_list = [1.1, 2.3, 3.5, 4.8, 5.6, -1.7, -2.3, -3.9, -4.5, -5.1] for x in val_list: print(binary_sigmoid(x))
code
50210004/cell_5
[ "text_plain_output_1.png" ]
import math import math import math def sigmoid(x): value = math.exp(x) / (1 + math.exp(x)) return value sigmoid(3)
code
90147270/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ex-1-dataset-iris/iris.csv') df.shape df.loc[df['variety'] == 'Setosa'] df_Setosa = df.loc[df['variety'] == 'Setosa'] df_Virginica = df.loc[df['variety'] == 'Virginica'] df_Versicolor = df.loc[df['variety'] == 'Versicolor'] plt.scatter(df_Setosa['sepal.width'], np.zeros_like(df_Setosa['sepal.width'])) plt.scatter(df_Virginica['sepal.width'], np.zeros_like(df_Virginica['sepal.width'])) plt.scatter(df_Versicolor['sepal.width'], np.zeros_like(df_Versicolor['sepal.width'])) plt.xlabel('sepal.width') plt.show()
code
90147270/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/ex-1-dataset-iris/iris.csv') df.shape df.loc[df['variety'] == 'Setosa'] df_Setosa = df.loc[df['variety'] == 'Setosa'] df_Virginica = df.loc[df['variety'] == 'Virginica'] df_Versicolor = df.loc[df['variety'] == 'Versicolor'] sns.FacetGrid(df, hue='variety', height=5).map(plt.scatter, 'sepal.width', 'petal.width').add_legend() sns.pairplot(df, hue='variety', height=2)
code
90147270/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90147270/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ex-1-dataset-iris/iris.csv') df.head(150) df.shape
code
90147270/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/ex-1-dataset-iris/iris.csv') df.shape df.loc[df['variety'] == 'Setosa'] df_Setosa = df.loc[df['variety'] == 'Setosa'] df_Virginica = df.loc[df['variety'] == 'Virginica'] df_Versicolor = df.loc[df['variety'] == 'Versicolor'] sns.FacetGrid(df, hue='variety', height=5).map(plt.scatter, 'sepal.width', 'petal.width').add_legend() plt.show()
code
90140128/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') def detect_NaNs(df_temp: pd.DataFrame, name='', silent: bool=False, plot: bool=True): """ Detect NaNs in a provided dataframe and return the columns that NaNs were detected in Parameters ---------- df_temp : pd.DataFrame Dataframe to detect NaN values in name : str Name of the dataframe which helps give a more descriptive read out silent : bool Whether the print statements should fire plot : bool Whether to return a plot of the counts of NaNs in the data Returns ------- typing.List List of columns in the provided dataframe that contain NaN values """ count_nulls = df_temp.isnull().sum().sum() columns_with_NaNs = [] if count_nulls > 0: for col in df_temp.columns: if df_temp[col].isnull().sum().sum() > 0: columns_with_NaNs.append(col) return columns_with_NaNs nan_columns = detect_NaNs(df, 'Training Data')
code
90140128/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import typing df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') def detect_NaNs(df_temp: pd.DataFrame, name='', silent: bool=False, plot: bool=True): """ Detect NaNs in a provided dataframe and return the columns that NaNs were detected in Parameters ---------- df_temp : pd.DataFrame Dataframe to detect NaN values in name : str Name of the dataframe which helps give a more descriptive read out silent : bool Whether the print statements should fire plot : bool Whether to return a plot of the counts of NaNs in the data Returns ------- typing.List List of columns in the provided dataframe that contain NaN values """ count_nulls = df_temp.isnull().sum().sum() columns_with_NaNs = [] if count_nulls > 0: for col in df_temp.columns: if df_temp[col].isnull().sum().sum() > 0: columns_with_NaNs.append(col) return columns_with_NaNs nan_columns = detect_NaNs(df, 'Training Data') def fill_nans_create_columns(df_temp: pd.DataFrame, columns: typing.List, value: float=0): """ Fill NaN of provided columns and create columns to signify they weren't there. Parameters ---------- df_temp : pd.DataFrame Dataframe to modify columns : typing.List Columns of the provided dataframe to modify value : float Value to replace the NaN values with Returns ------- pd.DataFrame Modified Dataframe with NaNs filled and new columns signifying the rows that contained NaNs """ for col in columns: df_temp[col + '_was_null'] = df_temp[col].isnull().astype(int) df_temp[col] = df_temp[col].fillna(value) return df_temp df = fill_nans_create_columns(df, nan_columns, 0) test_df = fill_nans_create_columns(test_df, nan_columns, 0) def detect_duplicates(df_temp: pd.DataFrame): """ Detect duplicates in data and return the columns in which duplicates where detected. Parameters ---------- df_temp : pd.DataFrame Dataframe to detect duplicates in """ cols_to_use = [] id_cols = [] for col in df_temp.columns: if len(df_temp[col].unique()) != len(df_temp[col]): cols_to_use.append(col) else: id_cols.append(col) df_temp = df_temp.copy()[cols_to_use] count_dupes = df_temp.duplicated().sum() detect_duplicates(df)
code
90140128/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') df.head()
code
104129202/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns pd.options.display.min_rows = 100 pd.options.display.max_rows = 100 plt.style.use('seaborn-whitegrid') plt.rc('figure', autolayout=True, titlesize=18, titleweight='bold') plt.rc('axes', labelweight='bold', labelsize='large', titleweight='bold', titlesize=16, titlepad=10) get_ipython().config.InlineBackend.figure_format = 'retina' leaderboard = pd.read_csv('../input/feedback-prize-efficiency-data/leaderboard.csv', index_col='EfficiencyRank') leaderboard.to_csv('full_leaderboard.csv') def is_pareto_efficient(costs, return_mask=True): """ Find the pareto-efficient points :param costs: An (n_points, n_costs) array :param return_mask: True to return a mask :return: An array of indices of pareto-efficient points. If return_mask is True, this will be an (n_points, ) boolean array Otherwise it will be a (n_efficient_points, ) integer array of indices. """ is_efficient = np.arange(costs.shape[0]) n_points = costs.shape[0] next_point_index = 0 while next_point_index < len(costs): nondominated_point_mask = np.any(costs < costs[next_point_index], axis=1) nondominated_point_mask[next_point_index] = True is_efficient = is_efficient[nondominated_point_mask] costs = costs[nondominated_point_mask] next_point_index = np.sum(nondominated_point_mask[:next_point_index]) + 1 if return_mask: is_efficient_mask = np.zeros(n_points, dtype=bool) is_efficient_mask[is_efficient] = True return is_efficient_mask else: return is_efficient max_runtime = 32400 min_logloss = 0.55435 def efficiency_score(score, scoring_time): return score / (np.log(3) - min_logloss) + scoring_time / max_runtime df_costs = leaderboard.loc[:, ['PrivateScore', 'ScoringTime']] df_costs['efficient'] = is_pareto_efficient(df_costs.assign(PrivateScore=lambda x: x['PrivateScore']).to_numpy()) df_costs['efficient'] = df_costs['efficient'].astype('category') fig, ax = plt.subplots(figsize=(7, 7), dpi=120) x = [min_logloss, np.log(3)] y = [0, max_runtime] xx, yy = np.meshgrid(np.linspace(x[0], x[1], 100), np.linspace(y[0], y[1], 100)) zz = efficiency_score(xx, yy) ax.contourf(xx, yy, zz, levels=25, alpha=0.5, zorder=1, cmap='magma') df_efficient = df_costs.astype({'efficient': bool}).query('efficient').sort_values(f'PrivateScore') ax.plot(df_efficient['PrivateScore'], df_efficient['ScoringTime'], '.-', color='C3', zorder=100, linewidth=1) hue_order = [True, False] colors = {True: 'C3', False: 'C0'} sns.scatterplot(x='PrivateScore', y='ScoringTime', hue='efficient', data=df_costs, ax=ax, palette=colors, hue_order=hue_order) ax.set_xlim(x[0] - 0.001, x[1] + 0.001) ax.set_ylim(y[0] - 10, y[1] + 10) ax.set_title('Efficiency Leaderboard') ax.grid([])
code
104129202/cell_3
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns pd.options.display.min_rows = 100 pd.options.display.max_rows = 100 plt.style.use('seaborn-whitegrid') plt.rc('figure', autolayout=True, titlesize=18, titleweight='bold') plt.rc('axes', labelweight='bold', labelsize='large', titleweight='bold', titlesize=16, titlepad=10) get_ipython().config.InlineBackend.figure_format = 'retina' leaderboard = pd.read_csv('../input/feedback-prize-efficiency-data/leaderboard.csv', index_col='EfficiencyRank') leaderboard.to_csv('full_leaderboard.csv') leaderboard.head(10)
code
33120194/cell_42
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) print(numeric_data.shape) print(categorical_data.shape)
code
33120194/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train.info()
code
33120194/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test.shape
code
33120194/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) train.describe()
code
33120194/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() sns.boxplot(target, color='turquoise')
code
33120194/cell_26
[ "text_html_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) train[missingdata_df].isna().sum().sort_values(ascending=False)[:20]
code
33120194/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) # Compute the correlation matrix corr = numeric_data.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) plt.title('Correlation of Numeric Features',y=1,size=16) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) correlation = numeric_data.corr() k = 10 cols = correlation.nlargest(k, 'SalePrice')['SalePrice'].index print(cols) cm = np.corrcoef(train[cols].values.T) f, ax = plt.subplots(figsize=(14, 11)) plt.title('Correlation of 10 numerical features with highest R with Sales Price', y=1, size=16) sns.heatmap(cm, vmax=0.8, linewidths=0.01, square=True, annot=True, cmap='viridis', linecolor='black', xticklabels=cols.values, annot_kws={'size': 12}, yticklabels=cols.values)
code
33120194/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test.shape test.info()
code
33120194/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33120194/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape
code
33120194/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test.shape test.head()
code
33120194/cell_51
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) # Compute the correlation matrix corr = numeric_data.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) plt.title('Correlation of Numeric Features',y=1,size=16) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) correlation = numeric_data.corr() k= 10 # just taking 10 column with largest correlation coefficients with sales price cols = correlation.nlargest(k,'SalePrice')['SalePrice'].index print(cols) cm = np.corrcoef(train[cols].values.T) f , ax = plt.subplots(figsize = (14,11)) plt.title('Correlation of 10 numerical features with highest R with Sales Price',y=1,size=16) sns.heatmap(cm, vmax=.8, linewidths=0.01,square=True,annot=True,cmap='viridis', linecolor="black",xticklabels = cols.values ,annot_kws = {'size':12},yticklabels = cols.values) sns.set() columns = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'GarageArea', 'TotalBsmtSF', '1stFlrSF', 'FullBath', 'TotRmsAbvGrd', 'YearBuilt'] sns.pairplot(train[columns], height=2, kind='scatter', diag_kind='kde') plt.show()
code
33120194/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) msno.bar(train[missingdata_df], color='turquoise', figsize=(30, 18))
code
33120194/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) sns.boxplot(train['SalePrice'], color='turquoise')
code
33120194/cell_47
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) # Compute the correlation matrix corr = numeric_data.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) plt.title('Correlation of Numeric Features',y=1,size=16) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) correlation = numeric_data.corr() print(correlation['SalePrice'].sort_values(ascending=False), '\n')
code
33120194/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train.head()
code
33120194/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) sns.distplot(train['SalePrice'], color='turquoise')
code
33120194/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) numeric_data.head()
code
33120194/cell_31
[ "text_plain_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) msno.heatmap(train)
code
33120194/cell_46
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() train = train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1) numeric_data = train.select_dtypes(include=[np.number]) categorical_data = train.select_dtypes(exclude=[np.number]) corr = numeric_data.corr() mask = np.triu(np.ones_like(corr, dtype=np.bool)) f, ax = plt.subplots(figsize=(11, 9)) plt.title('Correlation of Numeric Features', y=1, size=16) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
code
33120194/cell_22
[ "text_html_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df])
code
33120194/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import missingno as msno import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 20) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') train.shape train = train.drop(['Id'], axis=1) missingdata_df = train.columns[train.isnull().any()].tolist() msno.matrix(train[missingdata_df]) target = np.log(train['SalePrice']) target.skew() plt.hist(target, color='turquoise')
code
32066191/cell_9
[ "image_output_1.png" ]
junk = ['Id', 'Date', 'Province_State'] train.drop(junk, axis=1, inplace=True) train.head()
code
32066191/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
train.tail()
code
32066191/cell_19
[ "text_plain_output_1.png" ]
from statsmodels.tsa.statespace.sarimax import SARIMAX import math import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) c = list() for i, x in enumerate(train['Province_State']): if x is not np.nan: c.append(x + ' - ' + train['Country_Region'][i]) else: c.append(train['Country_Region'][i]) junk = ['Id', 'Date', 'Province_State'] train.drop(junk, axis=1, inplace=True) end = 84 country_list = train['Country_Region'][0::end] def prep_data(train): X_train = train[train.ConfirmedCases > 0] X_train.reset_index(inplace=True, drop=True) train.reset_index(inplace=True, drop=True) return (X_train, train) def Calculate_Table(X_train): diff_conf, conf_old = ([], 0) diff_fat, fat_old = ([], 0) dd_conf, dc_old = ([], 0) dd_fat, df_old = ([], 0) ratios = [] for row in X_train.values: diff_conf.append(row[1] - conf_old) conf_old = row[1] diff_fat.append(row[2] - fat_old) fat_old = row[2] dd_conf.append(diff_conf[-1] - dc_old) dc_old = diff_conf[-1] dd_fat.append(diff_fat[-1] - df_old) df_old = diff_fat[-1] ratios.append(fat_old / conf_old) ratio = fat_old / conf_old return (diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio) def populate_df_features(X_train, diff_conf, diff_fat, dd_conf, dd_fat, ratios): pd.options.mode.chained_assignment = None X_train['diff_confirmed'] = diff_conf X_train['diff_fatalities'] = diff_fat X_train['dd_confirmed'] = dd_conf X_train['dd_fatalities'] = dd_fat X_train['ratios'] = ratios return X_train def fill_nan(variable): if math.isnan(variable): return 0 else: return variable def Cal_Series_Avg(X_train, ratio): d_c = fill_nan(X_train.diff_confirmed[X_train.diff_confirmed != 0].mean()) dd_c = fill_nan(X_train.dd_confirmed[X_train.dd_confirmed != 0].mean()) d_f = fill_nan(X_train.diff_fatalities[X_train.diff_fatalities != 0].mean()) dd_f = fill_nan(X_train.dd_fatalities[X_train.dd_fatalities != 0].mean()) rate = fill_nan(X_train.ratios[X_train.ratios != 0].mean()) rate = max(rate, ratio) return (d_c, dd_c, d_f, dd_f, rate) def apply_taylor(train, d_c, dd_c, d_f, dd_f, rate, end): pred_c, pred_f = (list(train.ConfirmedCases.loc[end - 12:end - 1].astype(int)), list(train.Fatalities.loc[end - 12:end - 1].astype(int))) for i in range(1, 32): pred_c.append(int(train.ConfirmedCases[end - 1] + d_c * i + 0.5 * dd_c * i ** 2)) pred_f.append(pred_c[-1] * rate) return (pred_c, pred_f) def apply_taylor2(train, d_c, dd_c, d_f, dd_f, rate, end): pred_c, pred_f = (list(train.ConfirmedCases.loc[end - 12:end - 11].astype(int)), list(train.Fatalities.loc[end - 2:end - 1].astype(int))) for i in range(1, 42): pred_c.append(int(train.ConfirmedCases[end - 1] + d_c * i + 0.5 * dd_c * i ** 2)) pred_f.append(pred_c[-1] * rate) return (pred_c, pred_f) pc = [] pf = [] pc2 = [] pf2 = [] pcS = [] pfS = [] pred_c = [] pred_f = [] pred_c2 = [] pred_f2 = [] pred_c_S = [] pred_f_S = [] for i, country in enumerate(country_list): country_data = train[train['Country_Region'] == country] X_train, country_data = prep_data(country_data) if len(X_train) > 0: diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio = Calculate_Table(X_train) X_train = populate_df_features(X_train, diff_conf, diff_fat, dd_conf, dd_fat, ratios) d_c, dd_c, d_f, dd_f, rate = Cal_Series_Avg(X_train, ratio) pred_c, pred_f = apply_taylor(country_data, d_c, dd_c, d_f, dd_f, rate, end) pred_c2, pred_f2 = apply_taylor2(country_data, d_c, dd_c, d_f, dd_f, rate, end) adj = end - len(X_train.ConfirmedCases) if end - 12 - adj > 10 & len(X_train.ConfirmedCases) > 2: model = SARIMAX(X_train.ConfirmedCases, order=(1, 0, 0), trend='t') model_fit = model.fit() pred_c_S = list(model_fit.predict(len(X_train.ConfirmedCases), 113 - adj)) my = [] s = 43 - (114 - end) my.extend(pred_c2[0:s]) my.extend(pred_c_S) pred_c_S = my.copy() modelf = SARIMAX(X_train.Fatalities, order=(1, 0, 0), trend='t') modelf_fit = modelf.fit() pred_f_S = list(modelf_fit.predict(end - adj, 113 - adj)) my = [] my.extend(pred_f2[0:s]) my.extend(pred_f_S) pred_f_S = my.copy() else: pred_c_S = pred_c2 pred_f_S = pred_f2 else: pred_c = list(np.zeros(43)) pred_f = list(np.zeros(43)) pred_c2 = list(np.zeros(43)) pred_f2 = list(np.zeros(43)) pred_c_S = list(np.zeros(43)) pred_f_S = list(np.zeros(43)) pc += pred_c pf += pred_f pc2 += pred_c2 pf2 += pred_f2 pcS += pred_c_S pfS += pred_f_S (len(pc), len(pcS), len(pc2))
code
32066191/cell_18
[ "text_html_output_1.png" ]
from statsmodels.tsa.statespace.sarimax import SARIMAX import math import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) c = list() for i, x in enumerate(train['Province_State']): if x is not np.nan: c.append(x + ' - ' + train['Country_Region'][i]) else: c.append(train['Country_Region'][i]) junk = ['Id', 'Date', 'Province_State'] train.drop(junk, axis=1, inplace=True) end = 84 country_list = train['Country_Region'][0::end] def prep_data(train): X_train = train[train.ConfirmedCases > 0] X_train.reset_index(inplace=True, drop=True) train.reset_index(inplace=True, drop=True) return (X_train, train) def Calculate_Table(X_train): diff_conf, conf_old = ([], 0) diff_fat, fat_old = ([], 0) dd_conf, dc_old = ([], 0) dd_fat, df_old = ([], 0) ratios = [] for row in X_train.values: diff_conf.append(row[1] - conf_old) conf_old = row[1] diff_fat.append(row[2] - fat_old) fat_old = row[2] dd_conf.append(diff_conf[-1] - dc_old) dc_old = diff_conf[-1] dd_fat.append(diff_fat[-1] - df_old) df_old = diff_fat[-1] ratios.append(fat_old / conf_old) ratio = fat_old / conf_old return (diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio) def populate_df_features(X_train, diff_conf, diff_fat, dd_conf, dd_fat, ratios): pd.options.mode.chained_assignment = None X_train['diff_confirmed'] = diff_conf X_train['diff_fatalities'] = diff_fat X_train['dd_confirmed'] = dd_conf X_train['dd_fatalities'] = dd_fat X_train['ratios'] = ratios return X_train def fill_nan(variable): if math.isnan(variable): return 0 else: return variable def Cal_Series_Avg(X_train, ratio): d_c = fill_nan(X_train.diff_confirmed[X_train.diff_confirmed != 0].mean()) dd_c = fill_nan(X_train.dd_confirmed[X_train.dd_confirmed != 0].mean()) d_f = fill_nan(X_train.diff_fatalities[X_train.diff_fatalities != 0].mean()) dd_f = fill_nan(X_train.dd_fatalities[X_train.dd_fatalities != 0].mean()) rate = fill_nan(X_train.ratios[X_train.ratios != 0].mean()) rate = max(rate, ratio) return (d_c, dd_c, d_f, dd_f, rate) def apply_taylor(train, d_c, dd_c, d_f, dd_f, rate, end): pred_c, pred_f = (list(train.ConfirmedCases.loc[end - 12:end - 1].astype(int)), list(train.Fatalities.loc[end - 12:end - 1].astype(int))) for i in range(1, 32): pred_c.append(int(train.ConfirmedCases[end - 1] + d_c * i + 0.5 * dd_c * i ** 2)) pred_f.append(pred_c[-1] * rate) return (pred_c, pred_f) def apply_taylor2(train, d_c, dd_c, d_f, dd_f, rate, end): pred_c, pred_f = (list(train.ConfirmedCases.loc[end - 12:end - 11].astype(int)), list(train.Fatalities.loc[end - 2:end - 1].astype(int))) for i in range(1, 42): pred_c.append(int(train.ConfirmedCases[end - 1] + d_c * i + 0.5 * dd_c * i ** 2)) pred_f.append(pred_c[-1] * rate) return (pred_c, pred_f) pc = [] pf = [] pc2 = [] pf2 = [] pcS = [] pfS = [] pred_c = [] pred_f = [] pred_c2 = [] pred_f2 = [] pred_c_S = [] pred_f_S = [] for i, country in enumerate(country_list): country_data = train[train['Country_Region'] == country] X_train, country_data = prep_data(country_data) if len(X_train) > 0: diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio = Calculate_Table(X_train) X_train = populate_df_features(X_train, diff_conf, diff_fat, dd_conf, dd_fat, ratios) d_c, dd_c, d_f, dd_f, rate = Cal_Series_Avg(X_train, ratio) pred_c, pred_f = apply_taylor(country_data, d_c, dd_c, d_f, dd_f, rate, end) pred_c2, pred_f2 = apply_taylor2(country_data, d_c, dd_c, d_f, dd_f, rate, end) adj = end - len(X_train.ConfirmedCases) if end - 12 - adj > 10 & len(X_train.ConfirmedCases) > 2: model = SARIMAX(X_train.ConfirmedCases, order=(1, 0, 0), trend='t') model_fit = model.fit() pred_c_S = list(model_fit.predict(len(X_train.ConfirmedCases), 113 - adj)) my = [] s = 43 - (114 - end) my.extend(pred_c2[0:s]) my.extend(pred_c_S) pred_c_S = my.copy() if i == 0: print(pred_c_S) print(country, len(pred_c_S), s) modelf = SARIMAX(X_train.Fatalities, order=(1, 0, 0), trend='t') modelf_fit = modelf.fit() pred_f_S = list(modelf_fit.predict(end - adj, 113 - adj)) my = [] my.extend(pred_f2[0:s]) my.extend(pred_f_S) pred_f_S = my.copy() else: pred_c_S = pred_c2 pred_f_S = pred_f2 else: pred_c = list(np.zeros(43)) pred_f = list(np.zeros(43)) pred_c2 = list(np.zeros(43)) pred_f2 = list(np.zeros(43)) pred_c_S = list(np.zeros(43)) pred_f_S = list(np.zeros(43)) pc += pred_c pf += pred_f pc2 += pred_c2 pf2 += pred_f2 pcS += pred_c_S pfS += pred_f_S
code
32066191/cell_3
[ "text_plain_output_1.png" ]
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
code
32066191/cell_10
[ "text_plain_output_1.png" ]
junk = ['Id', 'Date', 'Province_State'] train.drop(junk, axis=1, inplace=True) end = 84 country_list = train['Country_Region'][0::end] print(len(country_list)) print(country_list)
code
32066191/cell_5
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra c = list() for i, x in enumerate(train['Province_State']): if x is not np.nan: c.append(x + ' - ' + train['Country_Region'][i]) else: c.append(train['Country_Region'][i]) print(len(c))
code
89134183/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing import image from pathlib import Path import numpy as np # linear algebra import tensorflow as tf img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) import tensorflow as tf y = np.where(y[..., 0] > 0, 1, 0)[..., tf.newaxis] (X.shape, y.shape)
code
89134183/cell_9
[ "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_48.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_52.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_42.png", "text_plain_output_23.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_33.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_39.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_46.png" ]
from keras.preprocessing import image from pathlib import Path import numpy as np # linear algebra img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) print(np.sum(y[0, ..., 0] == y[0, ..., 1])) print(np.sum(y[0, ..., 1] == y[0, ..., 2]))
code
89134183/cell_25
[ "text_plain_output_1.png" ]
from IPython.display import clear_output from keras.layers import (Activation, Input, MaxPooling2D, BatchNormalization, from keras.models import Model from keras.preprocessing import image from pathlib import Path from tensorflow.keras.utils import plot_model import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import tensorflow as tf img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) import tensorflow as tf y = np.where(y[..., 0] > 0, 1, 0)[..., tf.newaxis] (X_train.shape, y_train.shape, X_test.shape, y_test.shape) sample_image = X_test[0] sample_mask = y_test[0] import keras from IPython.display import clear_output def show_predictions(model): pred_mask = model.predict(sample_image[None]) print(pred_mask.shape) fig, ax = plt.subplots(1, 3, figsize=(15, 8)) ax[0].imshow(sample_image) ax[1].imshow(sample_mask[..., 0]) ax[2].imshow(np.squeeze(pred_mask, axis=0)) plt.show() class DisplayCallback(keras.callbacks.Callback): def __init__(self, patience=1): super().__init__() self.patience = patience def on_train_begin(self, logs=None): self.wait = 0 def on_epoch_end(self, epoch, logs=None): self.wait += 1 if self.wait >= self.patience: clear_output(wait=True) show_predictions(self.model) print(f'\nSample Prediction after epoch {epoch+1}') self.wait = 0 from keras.models import Model from keras.layers import Activation, Input, MaxPooling2D, BatchNormalization, Conv2D, Conv2DTranspose, concatenate, Dropout from tensorflow.keras.utils import plot_model def Unet(num_classes=1, input_shape=(256, 256, 3), start_num_masks=64, deep=4, mask_size=(3, 3), conv_padding='same', input_layers=None, skip_connection=None, only_layers=False): if not input_layers: skip_connection = [] img = Input(shape=input_shape) current_num_masks = start_num_masks x = img for i in range(deep): x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv1')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv2')(x) x = BatchNormalization()(x) x = Activation('relu')(x) skip_connection.append(x) x = MaxPooling2D()(x) current_num_masks = current_num_masks * 2 current_num_masks = current_num_masks / 2 else: current_num_masks = sum([start_num_masks for _ in range(4)]) for layer in input_layers.layers[:-1]: layer.trainable = False x = input_layers.layers[-1].output img = input_layers.inputs for i in range(deep): x = Conv2DTranspose(current_num_masks, (2, 2), strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = concatenate([x, skip_connection[-(i + 1)]]) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) current_num_masks = current_num_masks / 2 if only_layers: return (img, x) x = Conv2D(num_classes, (3, 3), activation='sigmoid', padding='same')(x) model = Model(img, x) return model model = Unet(start_num_masks=128, deep=5) plot_model(model, show_shapes=True)
code
89134183/cell_6
[ "image_output_1.png" ]
from keras.preprocessing import image from pathlib import Path import matplotlib.pyplot as plt img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) plt.imshow(masks[0].convert('RGBA'))
code
89134183/cell_1
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89134183/cell_16
[ "text_plain_output_1.png" ]
(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
code
89134183/cell_24
[ "image_output_1.png" ]
from IPython.display import clear_output from keras.layers import (Activation, Input, MaxPooling2D, BatchNormalization, from keras.models import Model from keras.preprocessing import image from pathlib import Path import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import tensorflow as tf img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) import tensorflow as tf y = np.where(y[..., 0] > 0, 1, 0)[..., tf.newaxis] (X_train.shape, y_train.shape, X_test.shape, y_test.shape) sample_image = X_test[0] sample_mask = y_test[0] import keras from IPython.display import clear_output def show_predictions(model): pred_mask = model.predict(sample_image[None]) print(pred_mask.shape) fig, ax = plt.subplots(1, 3, figsize=(15, 8)) ax[0].imshow(sample_image) ax[1].imshow(sample_mask[..., 0]) ax[2].imshow(np.squeeze(pred_mask, axis=0)) plt.show() class DisplayCallback(keras.callbacks.Callback): def __init__(self, patience=1): super().__init__() self.patience = patience def on_train_begin(self, logs=None): self.wait = 0 def on_epoch_end(self, epoch, logs=None): self.wait += 1 if self.wait >= self.patience: clear_output(wait=True) show_predictions(self.model) print(f'\nSample Prediction after epoch {epoch+1}') self.wait = 0 from keras.models import Model from keras.layers import Activation, Input, MaxPooling2D, BatchNormalization, Conv2D, Conv2DTranspose, concatenate, Dropout from tensorflow.keras.utils import plot_model def Unet(num_classes=1, input_shape=(256, 256, 3), start_num_masks=64, deep=4, mask_size=(3, 3), conv_padding='same', input_layers=None, skip_connection=None, only_layers=False): if not input_layers: skip_connection = [] img = Input(shape=input_shape) current_num_masks = start_num_masks x = img for i in range(deep): x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv1')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv2')(x) x = BatchNormalization()(x) x = Activation('relu')(x) skip_connection.append(x) x = MaxPooling2D()(x) current_num_masks = current_num_masks * 2 current_num_masks = current_num_masks / 2 else: current_num_masks = sum([start_num_masks for _ in range(4)]) for layer in input_layers.layers[:-1]: layer.trainable = False x = input_layers.layers[-1].output img = input_layers.inputs for i in range(deep): x = Conv2DTranspose(current_num_masks, (2, 2), strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = concatenate([x, skip_connection[-(i + 1)]]) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) current_num_masks = current_num_masks / 2 if only_layers: return (img, x) x = Conv2D(num_classes, (3, 3), activation='sigmoid', padding='same')(x) model = Model(img, x) return model model = Unet(start_num_masks=128, deep=5)
code
89134183/cell_27
[ "text_plain_output_1.png" ]
from IPython.display import clear_output from keras.layers import (Activation, Input, MaxPooling2D, BatchNormalization, from keras.models import Model from keras.preprocessing import image from pathlib import Path from tensorflow.keras.optimizers import Adam import keras import keras.backend as K import matplotlib.pyplot as plt import numpy as np # linear algebra import tensorflow as tf img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) import tensorflow as tf y = np.where(y[..., 0] > 0, 1, 0)[..., tf.newaxis] (X_train.shape, y_train.shape, X_test.shape, y_test.shape) import keras.backend as K def dice_coef(y_true, y_pred): return 2.0 * K.sum(y_true * y_pred) / (K.sum(y_true) + K.sum(y_pred)) sample_image = X_test[0] sample_mask = y_test[0] import keras from IPython.display import clear_output def show_predictions(model): pred_mask = model.predict(sample_image[None]) print(pred_mask.shape) fig, ax = plt.subplots(1, 3, figsize=(15, 8)) ax[0].imshow(sample_image) ax[1].imshow(sample_mask[..., 0]) ax[2].imshow(np.squeeze(pred_mask, axis=0)) plt.show() class DisplayCallback(keras.callbacks.Callback): def __init__(self, patience=1): super().__init__() self.patience = patience def on_train_begin(self, logs=None): self.wait = 0 def on_epoch_end(self, epoch, logs=None): self.wait += 1 if self.wait >= self.patience: clear_output(wait=True) show_predictions(self.model) print(f'\nSample Prediction after epoch {epoch+1}') self.wait = 0 from keras.models import Model from keras.layers import Activation, Input, MaxPooling2D, BatchNormalization, Conv2D, Conv2DTranspose, concatenate, Dropout from tensorflow.keras.utils import plot_model def Unet(num_classes=1, input_shape=(256, 256, 3), start_num_masks=64, deep=4, mask_size=(3, 3), conv_padding='same', input_layers=None, skip_connection=None, only_layers=False): if not input_layers: skip_connection = [] img = Input(shape=input_shape) current_num_masks = start_num_masks x = img for i in range(deep): x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv1')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same', name=f'bl_{i + 1}_conv2')(x) x = BatchNormalization()(x) x = Activation('relu')(x) skip_connection.append(x) x = MaxPooling2D()(x) current_num_masks = current_num_masks * 2 current_num_masks = current_num_masks / 2 else: current_num_masks = sum([start_num_masks for _ in range(4)]) for layer in input_layers.layers[:-1]: layer.trainable = False x = input_layers.layers[-1].output img = input_layers.inputs for i in range(deep): x = Conv2DTranspose(current_num_masks, (2, 2), strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = concatenate([x, skip_connection[-(i + 1)]]) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(current_num_masks, mask_size, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) current_num_masks = current_num_masks / 2 if only_layers: return (img, x) x = Conv2D(num_classes, (3, 3), activation='sigmoid', padding='same')(x) model = Model(img, x) return model model = Unet(start_num_masks=128, deep=5) from tensorflow.keras.optimizers import Adam model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[dice_coef]) history = model.fit(X_train, y_train, epochs=40, batch_size=2, validation_data=(X_test, y_test), callbacks=[DisplayCallback(patience=5)])
code
89134183/cell_12
[ "image_output_1.png" ]
from keras.preprocessing import image from pathlib import Path import matplotlib.pyplot as plt import numpy as np # linear algebra import tensorflow as tf img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) X = [] for img in images: x = image.img_to_array(img) X.append(x) X = np.array(X) y = [] for mask in masks: y.append(image.img_to_array(mask)) y = np.array(y) import tensorflow as tf y = np.where(y[..., 0] > 0, 1, 0)[..., tf.newaxis] plt.imshow(y[0]) plt.show()
code
89134183/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.preprocessing import image from pathlib import Path import matplotlib.pyplot as plt img_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/CXR_png/') mask_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/masks') test_root_path = Path('../input/chest-xray-masks-and-labels/Lung Segmentation/test') img_paths = sorted(img_root_path.iterdir()) mask_paths = sorted(mask_root_path.iterdir()) test_paths = sorted(test_root_path.iterdir()) images = [] masks = [] test_images = [] image_size = (256, 256) for img, mask, test_img in zip(img_paths, mask_paths, test_paths): images.append(image.load_img(img, target_size=image_size)) masks.append(image.load_img(mask, target_size=image_size)) test_images.append(image.load_img(test_img, target_size=image_size)) plt.imshow(images[0].convert('RGBA')) plt.show()
code
121154806/cell_21
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) BATCH_SIZE = 5 EPOCHS = 400 N_CLASS = 1 ACTIVATION = 'sigmoid' CALLBACKS = [tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss')] def Model_Training(model_list, batch_size, epochs, callbacks): model_dict = {} for key, dict_i in model_list.items(): if dict_i['Train'] == True: model = dict_i['model'] model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy', sm.losses.bce_jaccard_loss, sm.metrics.iou_score]) model.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25, verbose=2, callbacks=callbacks) model_dict[key] = model return model_dict model_list = {'unet-efficientnetb0': {'model': sm.Unet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}, 'linknet-efficientnetb0': {'model': sm.Linknet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}} model_dict = Model_Training(model_list, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=CALLBACKS) model_dict
code
121154806/cell_9
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import gif2numpy import matplotlib.pyplot as plt plt.imshow(gif2numpy.convert(masks_drive_test[0])[0][0])
code
121154806/cell_23
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) BATCH_SIZE = 5 EPOCHS = 400 N_CLASS = 1 ACTIVATION = 'sigmoid' CALLBACKS = [tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss')] def Model_Training(model_list, batch_size, epochs, callbacks): model_dict = {} for key, dict_i in model_list.items(): if dict_i['Train'] == True: model = dict_i['model'] model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy', sm.losses.bce_jaccard_loss, sm.metrics.iou_score]) model.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25, verbose=2, callbacks=callbacks) model_dict[key] = model return model_dict model_list = {'unet-efficientnetb0': {'model': sm.Unet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}, 'linknet-efficientnetb0': {'model': sm.Linknet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}} model_dict = Model_Training(model_list, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=CALLBACKS) unet_effb0 = list(model_dict.items())[0][1] linknet_effb0 = list(model_dict.items())[1][1] unet_effb0.save('unet_effb0.h5') linknet_effb0.save('linknet_effb0.h5')
code
121154806/cell_26
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) def visualize(**images): """PLot images in one row.""" n = len(images) for i, (name, image) in enumerate(images.items()): plt.xticks([]) plt.yticks([]) for img, msk in zip(X_train[:6], y_train[:6]): visualize(image=img, gt_mask=np.squeeze(msk)) BATCH_SIZE = 5 EPOCHS = 400 N_CLASS = 1 ACTIVATION = 'sigmoid' CALLBACKS = [tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss')] def Model_Training(model_list, batch_size, epochs, callbacks): model_dict = {} for key, dict_i in model_list.items(): if dict_i['Train'] == True: model = dict_i['model'] model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy', sm.losses.bce_jaccard_loss, sm.metrics.iou_score]) model.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25, verbose=2, callbacks=callbacks) model_dict[key] = model return model_dict model_list = {'unet-efficientnetb0': {'model': sm.Unet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}, 'linknet-efficientnetb0': {'model': sm.Linknet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}} model_dict = Model_Training(model_list, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=CALLBACKS) unet_effb0 = list(model_dict.items())[0][1] linknet_effb0 = list(model_dict.items())[1][1] unet_effb0.save('unet_effb0.h5') linknet_effb0.save('linknet_effb0.h5') linknet_effb0.save_weights('model_weights.h5') with open('model_architecture.json', 'w') as f: f.write(linknet_effb0.to_json()) def display_result(display_list, extra_title=''): title = ['Input Image', 'True Mask', 'Predicted Mask Unet', 'Predicted Mask Linknet'] if len(display_list) > len(title): title.append(extra_title) for i in range(len(display_list)): plt.axis('off') for i in range(len(X_test[:5])): display_result([X_test[i], y_test[i], unet_effb0.predict(np.expand_dims(X_test[i], axis=0))[0], linknet_effb0.predict(np.expand_dims(X_test[i], axis=0))[0]])
code
121154806/cell_2
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import segmentation_models as sm sm.set_framework('tf.keras') sm.framework()
code
121154806/cell_11
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1)
code