path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34121580/cell_19
[ "image_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red
code
34121580/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import statsmodels.api as sm df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) x = df1['Active'] y = target['Deceased'] model = sm.OLS(y, x).fit() predictions = model.predict(x) model.summary()
code
34121580/cell_28
[ "image_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df4 = pd.read_csv('../input/covid-19-india/hotty.csv') df4.columns
code
34121580/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) print(labels)
code
34121580/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) explode = [] for i in labels: explode.append(0.05) plt.figure(figsize=(15, 10)) plt.pie(decease, labels=labels, autopct='%1.1f%%', startangle=9, explode=explode) centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.tight_layout()
code
34121580/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) df1['Active'] = df1['Confirmed'] - (df1['Deceased'] + df1['Recovered']) df1['Deceased Rate (per 100)'] = np.round(100 * df1['Deceased'] / df1['Confirmed'], 2) df1['Recovered Rate (per 100)'] = np.round(100 * df1['Recovered'] / df1['Confirmed'], 2) df1.sort_values('Confirmed', ascending=False).fillna(0).style.background_gradient(cmap='Blues', subset=['Confirmed']).background_gradient(cmap='Blues', subset=['Deceased']).background_gradient(cmap='Blues', subset=['Recovered']).background_gradient(cmap='Blues', subset=['Active']).background_gradient(cmap='Blues', subset=['Deceased Rate (per 100)']).background_gradient(cmap='Blues', subset=['Recovered Rate (per 100)'])
code
34121580/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] labels = list(df1.State) decease = list(df1.Deceased) explode = [] for i in labels: explode.append(0.05) centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.tight_layout() df2 = pd.read_csv('../input/covid-19-india/hotspot.csv') df2.Red df2.State df2.dropna df3 = df2.drop(37) plt.scatter(df3.State, df3.Red)
code
34121580/cell_12
[ "text_html_output_1.png" ]
from pandas.plotting import andrews_curves import pandas as pd df1 = pd.read_csv('../input/indiastate/data state.csv') target = pd.DataFrame(df1.Deceased, columns=['Deceased']) target = pd.DataFrame(df1.Deceased, columns=['Deceased']) X = df1[['Confirmed', 'Active', 'Recovered', 'Deceased']] y = target['Deceased'] from pandas.plotting import andrews_curves andrews_curves(df1, 'State', ax=None)
code
128027656/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3
code
128027656/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE
code
128027656/cell_25
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1 df1.describe().T df1.nunique() df1 = df1.drop(['ID', 'ZIP Code', 'city', 'states'], axis=1) nans = df1[df1.isna().any(axis=1)] print(f'Total rows with NaNs: {nans.shape[0]}\n')
code
128027656/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df
code
128027656/cell_23
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1 df1.describe().T df1.nunique() df1 = df1.drop(['ID', 'ZIP Code', 'city', 'states'], axis=1) df1[df1['Experience'] < 0]['Experience'].value_counts()
code
128027656/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1 df1.describe().T df1.nunique() df1 = df1.drop(['ID', 'ZIP Code', 'city', 'states'], axis=1) nans = df1[df1.isna().any(axis=1)] numerical = ['Age', 'Experience', 'Income', 'CCAvg', 'Mortgage', 'lat', 'long'] for column in numerical: plt.figure(figsize=(10, 5)) sns.distplot(df1, x=df1[column]) plt.title(column, backgroundcolor='black', color='white', fontsize=30) plt.xticks(rotation=90) plt.xlabel(column, fontsize=20) plt.grid() plt.show()
code
128027656/cell_2
[ "text_html_output_1.png" ]
pip install basemap
code
128027656/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1
code
128027656/cell_19
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1 df1.describe().T df1.nunique()
code
128027656/cell_1
[ "text_plain_output_1.png" ]
!pip install zipcodes
code
128027656/cell_7
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum()
code
128027656/cell_15
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1
code
128027656/cell_3
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import ComplementNB from sklearn.naive_bayes import BernoulliNB import zipcodes from mpl_toolkits.basemap import Basemap from warnings import filterwarnings filterwarnings('ignore')
code
128027656/cell_17
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4 DF = df.dropna() df1 = DF.append(DF_NONE1) df1 = df1.append(DF_NONE2) df1 = df1.append(DF_NONE3) df1 = df1.append(DF_NONE4) df1.reset_index(drop=True, inplace=True) df1 df1.describe().T
code
128027656/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2 DF_NONE3 = df.loc[df['ZIP Code'] == 96651] DF_NONE3['county'].fillna(value='Rudno and Hronom', inplace=True) DF_NONE3['lat'].fillna(value=48.4242, inplace=True) DF_NONE3['long'].fillna(value=18.7071, inplace=True) DF_NONE3 DF_NONE4 = df.loc[df['ZIP Code'] == 9307] DF_NONE4['county'].fillna(value='Albani', inplace=True) DF_NONE4['lat'].fillna(value=40.68106, inplace=True) DF_NONE4['long'].fillna(value=19.63539, inplace=True) DF_NONE4
code
128027656/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/bank-personal-loan/Bank_Personal_Loan.csv') df = pd.DataFrame(data) df df.isnull().sum() DF_NONE = df.loc[(df['ZIP Code'] == 92634) | (df['ZIP Code'] == 92717) | (df['ZIP Code'] == 96651) | (df['ZIP Code'] == 9307)] DF_NONE.reset_index(drop=True, inplace=True) DF_NONE DF_NONE1 = df.loc[df['ZIP Code'] == 92717] DF_NONE1['county'].fillna(value='irvine', inplace=True) DF_NONE1['lat'].fillna(value=33.6462, inplace=True) DF_NONE1['long'].fillna(value=-117.839, inplace=True) DF_NONE1 DF_NONE2 = df.loc[df['ZIP Code'] == 92634] DF_NONE2['county'].fillna(value='Fullerton', inplace=True) DF_NONE2['lat'].fillna(value=33.8739, inplace=True) DF_NONE2['long'].fillna(value=-117.9028, inplace=True) DF_NONE2
code
16118182/cell_21
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) bike.nunique() bike.duplicated().sum() bike = bike.drop_duplicates() bike.duplicated().sum()
code
16118182/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov
code
16118182/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum()
code
16118182/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape
code
16118182/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov import matplotlib.pyplot as plt plt.figure(figsize = (10,5)) ax = sns.boxplot(data = bike, orient = "h", color = "violet", palette = "Set1") plt.show() def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) bike.nunique() bike.duplicated().sum() bike = bike.drop_duplicates() bike.duplicated().sum() plt.figure(figsize=(12, 6)) g = sns.distplot(bike['registered']) g.set_xlabel('registered', fontsize=12) g.set_ylabel('Frequency', fontsize=12) g.set_title('Frequency Distribuition- registered bikes', fontsize=20)
code
16118182/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.info()
code
16118182/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T
code
16118182/cell_19
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) bike.nunique() bike.duplicated().sum()
code
16118182/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16118182/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T
code
16118182/cell_18
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) bike.nunique()
code
16118182/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T
code
16118182/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov all_columns = list(bike) numeric_columns = ['season', 'holiday', 'workingday', 'weather', 'temp', 'atemp', 'humidity', 'windspeed', 'casual', 'registered', 'count'] categorical_columns = [x for x in all_columns if x not in numeric_columns] print('\nNumeric columns') print(numeric_columns) print('\nCategorical columns') print(categorical_columns)
code
16118182/cell_17
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov all_columns = list(bike) numeric_columns = ['season', 'holiday', 'workingday', 'weather', 'temp', 'atemp', 'humidity', 'windspeed', 'casual', 'registered', 'count'] categorical_columns = [x for x in all_columns if x not in numeric_columns] def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) x = bike[numeric_columns] outlier_detect(x)
code
16118182/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov import matplotlib.pyplot as plt plt.figure(figsize=(10, 5)) ax = sns.boxplot(data=bike, orient='h', color='violet', palette='Set1') plt.show()
code
16118182/cell_22
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr bike_cov = bike.cov() bike_cov def outlier_detect(bike): for i in bike.describe().columns: Q1=bike.describe().at['25%',i] Q3=bike.describe().at['75%',i] IQR=Q3 - Q1 LTV=Q1 - 1.5 * IQR UTV=Q3 + 1.5 * IQR x=np.array(bike[i]) p=[] for j in x: if j < LTV or j>UTV: p.append(j) print('\n Outliers for Column : ', i, ' Outliers count ', len(p)) print(p) bike.nunique() bike.duplicated().sum() bike = bike.drop_duplicates() bike.duplicated().sum() bike.head(2)
code
16118182/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]])
code
16118182/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns bike.head(5).T bike.tail(5).T bike.isna().sum() bike.isnull().apply(lambda x: [sum(x), sum(x) * 100 / bike.shape[0]]) bike.describe().T bike_corr = bike.corr() bike_corr
code
16118182/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) bike = pd.read_csv('../input/bike_share.csv') bike.shape bike.columns
code
122244202/cell_25
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator (X_train.shape, X_val.shape, y_train.shape, y_val.shape) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range=0.01, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) model = Sequential([Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(28, 28, 1)), BatchNormalization(), Conv2D(32, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Dropout(0.25), Conv2D(64, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Conv2D(128, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Flatten(), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.25), Dense(10, activation='softmax')]) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) callbacks = MyCallback(monitor='val_accuracy') history = model.fit(datagen.flow(X_train, y_train, batch_size=100), steps_per_epoch=len(X_train) / 100, epochs=20, validation_data=(X_val, y_val), callbacks=[callbacks])
code
122244202/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') submission = pd.read_csv('../input/digit-recognizer/sample_submission.csv') train_data.head()
code
122244202/cell_29
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') submission = pd.read_csv('../input/digit-recognizer/sample_submission.csv') X = train_data.drop(columns='label') X = X / 255 X_test = test_data.values.reshape(-1, 28, 28, 1) X = X.values.reshape(-1, 28, 28, 1) X_test = X_test / 255 (X_train.shape, X_val.shape, y_train.shape, y_val.shape) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range=0.01, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) model = Sequential([Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(28, 28, 1)), BatchNormalization(), Conv2D(32, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Dropout(0.25), Conv2D(64, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Conv2D(128, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Flatten(), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.25), Dense(10, activation='softmax')]) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) callbacks = MyCallback(monitor='val_accuracy') history = model.fit(datagen.flow(X_train, y_train, batch_size=100), steps_per_epoch=len(X_train) / 100, epochs=20, validation_data=(X_val, y_val), callbacks=[callbacks]) predictions = model.predict(X_test)
code
122244202/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122244202/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') submission = pd.read_csv('../input/digit-recognizer/sample_submission.csv') train_data.isna().any().describe()
code
122244202/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') submission = pd.read_csv('../input/digit-recognizer/sample_submission.csv') test_data.isna().any().describe()
code
122244202/cell_16
[ "text_html_output_1.png" ]
(X_train.shape, X_val.shape, y_train.shape, y_val.shape)
code
122244202/cell_22
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.models import Sequential model = Sequential([Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(28, 28, 1)), BatchNormalization(), Conv2D(32, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Dropout(0.25), Conv2D(64, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Conv2D(128, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Flatten(), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.25), Dense(10, activation='softmax')]) model.summary()
code
122244202/cell_27
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt (X_train.shape, X_val.shape, y_train.shape, y_val.shape) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range=0.01, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) model = Sequential([Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(28, 28, 1)), BatchNormalization(), Conv2D(32, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Dropout(0.25), Conv2D(64, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Conv2D(128, (3, 3), padding='same', activation='relu'), BatchNormalization(), MaxPooling2D(2, 2), Flatten(), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.25), Dense(10, activation='softmax')]) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) callbacks = MyCallback(monitor='val_accuracy') history = model.fit(datagen.flow(X_train, y_train, batch_size=100), steps_per_epoch=len(X_train) / 100, epochs=20, validation_data=(X_val, y_val), callbacks=[callbacks]) fig, ax = plt.subplots(2, 1) ax[0].plot(history.history['loss'], color='b', label='Training loss') ax[0].plot(history.history['val_loss'], color='r', label='validation loss', axes=ax[0]) legend = ax[0].legend(loc='best', shadow=True) ax[1].plot(history.history['accuracy'], color='b', label='Training accuracy') ax[1].plot(history.history['val_accuracy'], color='r', label='Validation accuracy') legend = ax[1].legend(loc='best', shadow=True)
code
18132352/cell_9
[ "text_plain_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import pandas as pd import re import pandas as pd df = pd.read_csv('../input/bbc-text.csv') import re from nltk.stem.wordnet import WordNetLemmatizer stop_words = ['in', 'of', 'at', 'a', 'the'] def pre_process(text): text = str(text).lower() text = re.sub('((\\d+)[\\.])', '', text) text = re.sub('&lt;/?.*?&gt;', ' &lt;&gt; ', text) text = text.replace('dont', "don't") text = re.sub("[^a-zA-Z0-9.']+", ' ', text) "\n Don't include this in the beginning. \n First check if there are some patterns that may be lost if we remove stopwords.\n " text = [word for word in text.split(' ') if word not in stop_words] lmtzr = WordNetLemmatizer() text = ' '.join((lmtzr.lemmatize(i) for i in text)) return text for i in range(len(df)): df.text[i] = pre_process(df.text[i]) # Visualize the distribution of categories import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,6)) df.groupby('category').text.count().plot.bar(ylim=0) plt.show() from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score train_data = df.text[0:int(0.75 * len(df))] test_data = df.text[int(0.75 * len(df)) + 1:] train_target = df.category[0:int(0.75 * len(df))] test_target = df.category[int(0.75 * len(df)) + 1:] stop_words = ['in', 'of', 'at', 'a', 'the'] ngram_vectorizer = CountVectorizer(binary=True, ngram_range=(1, 3), stop_words=stop_words) ngram_vectorizer.fit(train_data) X_train = ngram_vectorizer.transform(train_data) X_test = ngram_vectorizer.transform(test_data) model = LogisticRegression() model.fit(X_train, train_target) test_acc = accuracy_score(test_target, model.predict(X_test)) print('Test accuracy: {0:.2f}%'.format(100 * test_acc))
code
18132352/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer import pandas as pd import re import pandas as pd df = pd.read_csv('../input/bbc-text.csv') import re from nltk.stem.wordnet import WordNetLemmatizer stop_words = ['in', 'of', 'at', 'a', 'the'] def pre_process(text): text = str(text).lower() text = re.sub('((\\d+)[\\.])', '', text) text = re.sub('&lt;/?.*?&gt;', ' &lt;&gt; ', text) text = text.replace('dont', "don't") text = re.sub("[^a-zA-Z0-9.']+", ' ', text) "\n Don't include this in the beginning. \n First check if there are some patterns that may be lost if we remove stopwords.\n " text = [word for word in text.split(' ') if word not in stop_words] lmtzr = WordNetLemmatizer() text = ' '.join((lmtzr.lemmatize(i) for i in text)) return text for i in range(len(df)): df.text[i] = pre_process(df.text[i]) df.head(10)
code
18132352/cell_2
[ "image_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/bbc-text.csv') df.head(10)
code
18132352/cell_11
[ "text_html_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import pandas as pd df = pd.read_csv('../input/bbc-text.csv') import re from nltk.stem.wordnet import WordNetLemmatizer stop_words = ['in', 'of', 'at', 'a', 'the'] def pre_process(text): text = str(text).lower() text = re.sub('((\\d+)[\\.])', '', text) text = re.sub('&lt;/?.*?&gt;', ' &lt;&gt; ', text) text = text.replace('dont', "don't") text = re.sub("[^a-zA-Z0-9.']+", ' ', text) "\n Don't include this in the beginning. \n First check if there are some patterns that may be lost if we remove stopwords.\n " text = [word for word in text.split(' ') if word not in stop_words] lmtzr = WordNetLemmatizer() text = ' '.join((lmtzr.lemmatize(i) for i in text)) return text for i in range(len(df)): df.text[i] = pre_process(df.text[i]) # Visualize the distribution of categories import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,6)) df.groupby('category').text.count().plot.bar(ylim=0) plt.show() from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score train_data = df.text[0:int(0.75 * len(df))] test_data = df.text[int(0.75 * len(df)) + 1:] train_target = df.category[0:int(0.75 * len(df))] test_target = df.category[int(0.75 * len(df)) + 1:] stop_words = ['in', 'of', 'at', 'a', 'the'] ngram_vectorizer = CountVectorizer(binary=True, ngram_range=(1, 3), stop_words=stop_words) ngram_vectorizer.fit(train_data) X_train = ngram_vectorizer.transform(train_data) X_test = ngram_vectorizer.transform(test_data) model = LogisticRegression() model.fit(X_train, train_target) test_acc = accuracy_score(test_target, model.predict(X_test)) import seaborn as sns from sklearn.metrics import confusion_matrix conf_mat = confusion_matrix(df.category[int(0.75 * len(df)) + 1:], model.predict(X_test)) fig, ax = plt.subplots(figsize=(10, 8)) sns.heatmap(conf_mat, annot=True, fmt='d', xticklabels=df.category.unique(), yticklabels=df.category.unique()) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show()
code
18132352/cell_12
[ "text_html_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import pandas as pd df = pd.read_csv('../input/bbc-text.csv') import re from nltk.stem.wordnet import WordNetLemmatizer stop_words = ['in', 'of', 'at', 'a', 'the'] def pre_process(text): text = str(text).lower() text = re.sub('((\\d+)[\\.])', '', text) text = re.sub('&lt;/?.*?&gt;', ' &lt;&gt; ', text) text = text.replace('dont', "don't") text = re.sub("[^a-zA-Z0-9.']+", ' ', text) "\n Don't include this in the beginning. \n First check if there are some patterns that may be lost if we remove stopwords.\n " text = [word for word in text.split(' ') if word not in stop_words] lmtzr = WordNetLemmatizer() text = ' '.join((lmtzr.lemmatize(i) for i in text)) return text for i in range(len(df)): df.text[i] = pre_process(df.text[i]) # Visualize the distribution of categories import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,6)) df.groupby('category').text.count().plot.bar(ylim=0) plt.show() from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score train_data = df.text[0:int(0.75 * len(df))] test_data = df.text[int(0.75 * len(df)) + 1:] train_target = df.category[0:int(0.75 * len(df))] test_target = df.category[int(0.75 * len(df)) + 1:] stop_words = ['in', 'of', 'at', 'a', 'the'] ngram_vectorizer = CountVectorizer(binary=True, ngram_range=(1, 3), stop_words=stop_words) ngram_vectorizer.fit(train_data) X_train = ngram_vectorizer.transform(train_data) X_test = ngram_vectorizer.transform(test_data) model = LogisticRegression() model.fit(X_train, train_target) test_acc = accuracy_score(test_target, model.predict(X_test)) import seaborn as sns from sklearn.metrics import confusion_matrix conf_mat = confusion_matrix(df.category[int(0.75*len(df))+1:], model.predict(X_test)) fig, ax = plt.subplots(figsize=(10,8)) sns.heatmap(conf_mat, annot=True, fmt='d', xticklabels=df.category.unique(), yticklabels=df.category.unique()) plt.ylabel('Actual') plt.xlabel('Predicted') plt.show() from sklearn import metrics print(metrics.classification_report(test_target, model.predict(X_test), target_names=df.category.unique()))
code
18132352/cell_5
[ "image_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer import matplotlib.pyplot as plt import pandas as pd import re import pandas as pd df = pd.read_csv('../input/bbc-text.csv') import re from nltk.stem.wordnet import WordNetLemmatizer stop_words = ['in', 'of', 'at', 'a', 'the'] def pre_process(text): text = str(text).lower() text = re.sub('((\\d+)[\\.])', '', text) text = re.sub('&lt;/?.*?&gt;', ' &lt;&gt; ', text) text = text.replace('dont', "don't") text = re.sub("[^a-zA-Z0-9.']+", ' ', text) "\n Don't include this in the beginning. \n First check if there are some patterns that may be lost if we remove stopwords.\n " text = [word for word in text.split(' ') if word not in stop_words] lmtzr = WordNetLemmatizer() text = ' '.join((lmtzr.lemmatize(i) for i in text)) return text for i in range(len(df)): df.text[i] = pre_process(df.text[i]) import matplotlib.pyplot as plt fig = plt.figure(figsize=(10, 6)) df.groupby('category').text.count().plot.bar(ylim=0) plt.show()
code
73074059/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.describe()
code
73074059/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100
code
73074059/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') print(f'Número de Linhas e Colunas: {df.shape}') df.head()
code
73074059/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns sns.histplot(data=df.Sobreviveu, x=df.Idade, hue=df.Sobreviveu, bins=14)
code
73074059/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns df.groupby('Faixa Etária')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 import seaborn as sns df.groupby('Renda')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Cabine.isnull().sum() / df.shape[0] * 100
code
73074059/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns df.groupby('Faixa Etária')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 import seaborn as sns df.groupby('Renda')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100
code
73074059/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100
code
73074059/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100
code
73074059/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100
code
73074059/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100
code
73074059/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum()
code
73074059/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns df.groupby('Faixa Etária')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 import seaborn as sns sns.histplot(data=df.Sobreviveu, x=df['Preço da Passagem'], hue=df.Sobreviveu, bins=12)
code
73074059/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100
code
73074059/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns df.groupby('Faixa Etária')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100
code
73074059/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.Sobreviveu.value_counts(normalize=True) * 100 df.groupby('Sexo')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.groupby('Classe')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 df.Classe.value_counts(normalize=True).sort_index() * 100 df.Sexo.value_counts(normalize=True) * 100 df.isnull().sum() df.Idade.isnull().sum() / df.shape[0] * 100 import seaborn as sns df.groupby('Faixa Etária')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 import seaborn as sns df.groupby('Renda')['Sobreviveu'].value_counts(normalize=True).sort_index() * 100 len(df['Preço da Passagem'].unique())
code
73074059/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.columns = ['Id', 'Sobreviveu', 'Classe', 'Nome', 'Sexo', 'Idade', 'Familiares', 'Dependentes', 'Ticket', 'Preço da Passagem', 'Cabine', 'Local de Embarque'] df.head()
code
32068693/cell_9
[ "text_html_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) pred = final_reg_case.predict(X_test_cases) print('The RMSE value', mean_squared_error(y_test_cases, pred) ** 0.5)
code
32068693/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) reg_case = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=2700) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_case, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) reg_fatal = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=3800) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal) data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) if Iday6 == 0: iavg = 1 else: iavg = Iday7 / Iday6 if Fday6 == 0: favg = 1 else: favg = Fday7 / Fday6 target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'iratio': iavg, 'fratio': favg, 'target_infection': target_infection, 'target_fatal': target_fatal}) featured = pd.DataFrame(data) X_y_f = shuffle(featured) y_cases_f = X_y_f['target_infection'] y_fatal_f = X_y_f['target_fatal'] X_f = X_y_f.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) best_alpha = 10000 best_itr = 4500 final_reg_case_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case_f.fit(X_train_cases_f, y_train_cases_f) pred_f = final_reg_case_f.predict(X_test_cases_f) best_alpha = 100 best_itr = 2700 final_reg_fatal_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_fatal_f.fit(X_train_fatal_f, y_train_fatal_f) pred_f = final_reg_fatal_f.predict(X_test_fatal_f) print('RMSE is:', mean_squared_error(y_test_fatal_f, pred_f) ** 0.5)
code
32068693/cell_6
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) print('Shape of infection train dataset:', (X_train_cases.shape, y_train_cases.shape)) print('Shape of infection test dataset:', (X_test_cases.shape, y_test_cases.shape)) print('Shape of fatal train dataset:', (X_train_fatal.shape, y_train_fatal.shape)) print('Shape of fatal test dataset:', (X_test_fatal.shape, y_test_fatal.shape))
code
32068693/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') print(df.shape, '\n', df.head())
code
32068693/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal)
code
32068693/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) reg_case = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=2700) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_case, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) reg_fatal = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=3800) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal) data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) if Iday6 == 0: iavg = 1 else: iavg = Iday7 / Iday6 if Fday6 == 0: favg = 1 else: favg = Fday7 / Fday6 target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'iratio': iavg, 'fratio': favg, 'target_infection': target_infection, 'target_fatal': target_fatal}) featured = pd.DataFrame(data) X_y_f = shuffle(featured) y_cases_f = X_y_f['target_infection'] y_fatal_f = X_y_f['target_fatal'] X_f = X_y_f.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) best_alpha = 10000 best_itr = 4500 final_reg_case_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case_f.fit(X_train_cases_f, y_train_cases_f) best_alpha = 100 best_itr = 2700 final_reg_fatal_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_fatal_f.fit(X_train_fatal_f, y_train_fatal_f)
code
32068693/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import math import random from sklearn.metrics import mean_squared_error from sklearn import metrics from sklearn.linear_model import ElasticNet from sklearn.model_selection import RandomizedSearchCV import pickle from sklearn.model_selection import train_test_split from tqdm import tqdm from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068693/cell_8
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases)
code
32068693/cell_16
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) reg_case = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=2700) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_case, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) reg_fatal = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=3800) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal) data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) if Iday6 == 0: iavg = 1 else: iavg = Iday7 / Iday6 if Fday6 == 0: favg = 1 else: favg = Fday7 / Fday6 target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'iratio': iavg, 'fratio': favg, 'target_infection': target_infection, 'target_fatal': target_fatal}) featured = pd.DataFrame(data) X_y_f = shuffle(featured) y_cases_f = X_y_f['target_infection'] y_fatal_f = X_y_f['target_fatal'] X_f = X_y_f.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) best_alpha = 10000 best_itr = 4500 final_reg_case_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case_f.fit(X_train_cases_f, y_train_cases_f)
code
32068693/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] print(df.shape, '\n', df.head())
code
32068693/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) reg_case = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=2700) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_case, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) reg_fatal = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=3800) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal) data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) if Iday6 == 0: iavg = 1 else: iavg = Iday7 / Iday6 if Fday6 == 0: favg = 1 else: favg = Fday7 / Fday6 target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'iratio': iavg, 'fratio': favg, 'target_infection': target_infection, 'target_fatal': target_fatal}) featured = pd.DataFrame(data) X_y_f = shuffle(featured) y_cases_f = X_y_f['target_infection'] y_fatal_f = X_y_f['target_fatal'] X_f = X_y_f.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) best_alpha = 10000 best_itr = 4500 final_reg_case_f = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case_f.fit(X_train_cases_f, y_train_cases_f) pred_f = final_reg_case_f.predict(X_test_cases_f) print('RMSE is:', mean_squared_error(y_test_cases_f, pred_f) ** 0.5)
code
32068693/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') test.head()
code
32068693/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) reg_case = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=2700) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_case, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) reg_fatal = ElasticNet(random_state=42, l1_ratio=0.1, max_iter=3800) params = [{'alpha': [10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 2, 10 ** 3, 10 ** 4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=5, scoring='neg_root_mean_squared_error', return_train_score=True) search = clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) if Iday6 == 0: iavg = 1 else: iavg = Iday7 / Iday6 if Fday6 == 0: favg = 1 else: favg = Fday7 / Fday6 target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'iratio': iavg, 'fratio': favg, 'target_infection': target_infection, 'target_fatal': target_fatal}) featured = pd.DataFrame(data) X_y_f = shuffle(featured) y_cases_f = X_y_f['target_infection'] y_fatal_f = X_y_f['target_fatal'] X_f = X_y_f.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) print('Shape of featurized infection train dataset:', (X_train_cases_f.shape, y_train_cases_f.shape)) print('Shape of featurized infection test dataset:', (X_test_cases_f.shape, y_test_cases_f.shape)) print('Shape of featurized fatal train dataset:', (X_train_fatal_f.shape, y_train_fatal_f.shape)) print('Shape of featurized fatal test dataset:', (X_test_fatal_f.shape, y_test_fatal_f.shape))
code
32068693/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) X_y = shuffle(new_data) y_cases = X_y['target_infection'] y_fatal = X_y['target_fatal'] X = X_y.drop(['target_infection', 'target_fatal'], axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) best_alpha = 1000 best_itr = 2700 final_reg_case = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_itr) final_reg_case.fit(X_train_cases, y_train_cases) pred = final_reg_case.predict(X_test_cases) best_alpha = 100 best_iter = 3800 final_reg_fatal = ElasticNet(random_state=42, alpha=best_alpha, l1_ratio=0.1, max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal) pred = final_reg_fatal.predict(X_test_fatal) print('The RMSE value', mean_squared_error(y_test_fatal, pred) ** 0.5)
code
32068693/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df['Province_State'].fillna('state', inplace=True) df['Country_Region'] = [country_name.replace("'", '') for country_name in df['Country_Region']] data = [] countries = df.Country_Region.unique() for country in countries: provinces = df[df.Country_Region == country].Province_State.unique() for province in provinces: temp_df = df[(df['Country_Region'] == country) & (df['Province_State'] == province)] for i in range(0, 77): Iday1 = float(temp_df.iloc[i].ConfirmedCases) Iday2 = float(temp_df.iloc[i + 1].ConfirmedCases) Iday3 = float(temp_df.iloc[i + 2].ConfirmedCases) Iday4 = float(temp_df.iloc[i + 3].ConfirmedCases) Iday5 = float(temp_df.iloc[i + 4].ConfirmedCases) Iday6 = float(temp_df.iloc[i + 5].ConfirmedCases) Iday7 = float(temp_df.iloc[i + 6].ConfirmedCases) Fday1 = float(temp_df.iloc[i].Fatalities) Fday2 = float(temp_df.iloc[i + 1].Fatalities) Fday3 = float(temp_df.iloc[i + 2].Fatalities) Fday4 = float(temp_df.iloc[i + 3].Fatalities) Fday5 = float(temp_df.iloc[i + 4].Fatalities) Fday6 = float(temp_df.iloc[i + 5].Fatalities) Fday7 = float(temp_df.iloc[i + 6].Fatalities) target_infection = float(temp_df.iloc[i + 7].ConfirmedCases) target_fatal = float(temp_df.iloc[i + 7].Fatalities) data.append({'Iday1': Iday1, 'Iday2': Iday2, 'Iday3': Iday3, 'Iday4': Iday4, 'Iday5': Iday5, 'Iday6': Iday6, 'Iday7': Iday7, 'Fday1': Fday1, 'Fday2': Fday2, 'Fday3': Fday3, 'Fday4': Fday4, 'Fday5': Fday5, 'Fday6': Fday6, 'Fday7': Fday7, 'target_infection': target_infection, 'target_fatal': target_fatal}) new_data = pd.DataFrame(data) print('The shape of new dataFrame:', new_data.shape, '\nThe columns are:', new_data.columns) print(new_data.head(-5))
code
128018474/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/btcusd/data1.csv') df['Datetime'] = [i for i in range(len(df['Datetime']))] new_df = df[['Open', 'Volume']] x = np.array(new_df['Open']).reshape(-1, 1) y = np.array(new_df['Volume']).reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.25) regr = linear_model.LinearRegression() regr.fit(X_train, y_train) print(f'Regr-Test: {regr.score(X_test, y_test)}') y_pred = regr.predict(X_test) plt.scatter(X_test, y_test, color='b') plt.plot(X_test, y_pred, color='r') plt.show()
code
128018474/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/btcusd/data1.csv') print(df.head(10)) df['Datetime'] = [i for i in range(len(df['Datetime']))] new_df = df[['Open', 'Volume']] sns.lmplot(data=new_df, x='Open', y='Volume', order=2, ci=None) plt.show()
code
128022704/cell_6
[ "text_plain_output_1.png" ]
array_list = array('B') for i in range(ELEMENTS_LIMIT): array_list.append(i)
code
128022704/cell_11
[ "text_plain_output_1.png" ]
data = array_list[:] for i in range(ELEMENTS_LIMIT - 1): _ = data.pop()
code
128022704/cell_1
[ "text_plain_output_1.png" ]
!python --version
code
128022704/cell_7
[ "text_plain_output_1.png" ]
deque_list = deque() for i in range(ELEMENTS_LIMIT): deque_list.append(i)
code
128022704/cell_8
[ "text_plain_output_1.png" ]
from array import array from collections import deque from sys import getsizeof ELEMENTS_LIMIT = 2 ** 8 - 1 def fill_and_print_details(x): for i in range(ELEMENTS_LIMIT): x.append(i) usual_list = [] array_list = array('B') deque_list = deque() fill_and_print_details(usual_list) fill_and_print_details(array_list) fill_and_print_details(deque_list)
code
128022704/cell_15
[ "text_plain_output_1.png" ]
data = array_list[:] for i in range(ELEMENTS_LIMIT - 1): _ = data.pop(0)
code
128022704/cell_16
[ "text_plain_output_1.png" ]
data = deque_list.copy() for i in range(ELEMENTS_LIMIT - 1): _ = data.popleft()
code
128022704/cell_14
[ "text_plain_output_1.png" ]
data = usual_list.copy() for i in range(ELEMENTS_LIMIT - 1): _ = data.pop(0)
code
128022704/cell_10
[ "text_plain_output_1.png" ]
data = usual_list.copy() for i in range(ELEMENTS_LIMIT - 1): _ = data.pop()
code