path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
50216735/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
test.head() | code |
50216735/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.naive_bayes import MultinomialNB
MNB = MultinomialNB()
MNB.fit(X_train, Y_train) | code |
50216735/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t')
test = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t')
test.head() | code |
88097739/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/winter-olympic-medals-1924-2018/Winter_Olympic_Medals.csv')
data.dtypes
data_type = pd.DataFrame(data.dtypes).T.rename({0: 'Column Data Type:'})
data_type
data.describe() | code |
88097739/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/winter-olympic-medals-1924-2018/Winter_Olympic_Medals.csv')
data.dtypes
data_type = pd.DataFrame(data.dtypes).T.rename({0: 'Column Data Type:'})
data_type | code |
88097739/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88097739/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/winter-olympic-medals-1924-2018/Winter_Olympic_Medals.csv')
data.head() | code |
88097739/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/winter-olympic-medals-1924-2018/Winter_Olympic_Medals.csv')
data.dtypes | code |
88097739/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/winter-olympic-medals-1924-2018/Winter_Olympic_Medals.csv')
data.dtypes
data_type = pd.DataFrame(data.dtypes).T.rename({0: 'Column Data Type:'})
data_type
data.info() | code |
17144256/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_
df = df.drop('Embarked', axis=1)
df_test = df_test.drop('Embarked', axis=1)
df[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X.toarray())
df_test[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X_test.toarray())
features = ['Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S', 'nan']
X = df[features]
X_test = df_test[features]
y = df.Survived
rf = RandomForestClassifier()
rf.fit(X, y)
y_pred = rf.predict(X_test)
y_pred[:5] | code |
17144256/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_
df = df.drop('Embarked', axis=1)
df_test = df_test.drop('Embarked', axis=1)
df.head() | code |
17144256/cell_4 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_ | code |
17144256/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.head() | code |
17144256/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
print(os.listdir('../input')) | code |
17144256/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_
df = df.drop('Embarked', axis=1)
df_test = df_test.drop('Embarked', axis=1)
df[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X.toarray())
df_test[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X_test.toarray())
df_test.head() | code |
17144256/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique() | code |
17144256/cell_10 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_
df = df.drop('Embarked', axis=1)
df_test = df_test.drop('Embarked', axis=1)
df[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X.toarray())
df_test[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X_test.toarray())
submi_df = df_test['PassengerId'].to_frame()
submi_df.head() | code |
17144256/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df['Sex'] = df.Sex.apply(lambda x: 1 if x == 'male' else 0)
df_test['Sex'] = df_test.Sex.apply(lambda x: 1 if x == 'male' else 0)
df['Embarked'] = df.Embarked.apply(lambda x: str(x))
df_test['Embarked'] = df_test.Embarked.apply(lambda x: str(x))
df.Age.fillna(29.699118, inplace=True)
df_test.Age.fillna(29.699118, inplace=True)
df_test.Fare.fillna(35, inplace=True)
df.Embarked.unique()
ohe = OneHotEncoder()
X = ohe.fit_transform(df.Embarked.values.reshape(-1, 1))
X_test = ohe.transform(df_test.Embarked.values.reshape(-1, 1))
ohe.categories_
df = df.drop('Embarked', axis=1)
df_test = df_test.drop('Embarked', axis=1)
df[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X.toarray())
df_test[['C', 'Q', 'S', 'nan']] = pd.DataFrame(X_test.toarray())
features = ['Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S', 'nan']
X = df[features]
X_test = df_test[features]
y = df.Survived
rf = RandomForestClassifier()
rf.fit(X, y) | code |
105205396/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
display_negative_values(internet_adoption, end_year=_2020) | code |
105205396/cell_23 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
df_rank = {}
for key_name in key_names:
if key_name != average_monthly_internet_costs:
df_rank[key_name] = df_list[key_name].rank(ascending=False).astype(int)
df_rank[average_monthly_internet_costs] = df_list[average_monthly_internet_costs].rank(ascending=True).astype(int)
df_rank_top_bottom = {}
for key_name in key_names:
df = df_rank[key_name]
df_new = pd.DataFrame(columns=df.columns)
index = []
for column in df.columns:
df_temp = df[[column]].sort_values(column)
sliced_df = pd.concat([])
df_new[column] = sliced_df.index
index = sliced_df[column]
df_new = df_new.set_index(index)
df_new.index.name = 'rank'
df_rank_top_bottom[key_name] = df_new
caption_string = '{table}: Top 5 and Bottom 5 Rank of each year'
table_name = internet_adoption
df_rank_top_bottom[table_name].style.set_caption(caption_string.format(table=table_name))
table_name = average_monthly_internet_costs
df_rank_top_bottom[table_name].style.set_caption(caption_string.format(table=table_name)) | code |
105205396/cell_6 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float) | code |
105205396/cell_7 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
print('Original data')
for key, df in df_list_orig.items():
print('{}: NaN {}, Shape {}'.format(key, df.isna().sum().sum(), df.shape))
print('-----------------------')
print('Data after NaN removal')
for key, df in df_list.items():
print('{}: NaN {}, Shape {}'.format(key, df.isna().sum().sum(), df.shape))
print('-----------------------')
for key, df in df_list.items():
print('{}: Columns {}'.format(key, df.columns)) | code |
105205396/cell_15 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
display_negative_values(average_after_tax_wages) | code |
105205396/cell_16 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
display_negative_values(GDP_per_capita) | code |
105205396/cell_14 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
display_positive_values(average_monthly_internet_costs) | code |
105205396/cell_22 | [
"text_html_output_1.png"
] | import os
import pandas as pd
pathvals = ['/kaggle/input/internet-prices-datasets-for-analysis', './data']
fnames = ['average_after_tax_wages.csv', 'average_monthly_internet.csv', 'GDP_per_capita.csv', 'internet_adoption.csv']
average_monthly_internet = 'average_monthly_internet'
average_monthly_internet_costs = 'average_monthly_internet_costs'
average_after_tax_wages = 'average_after_tax_wages'
GDP_per_capita = 'GDP_per_capita'
internet_adoption = 'internet_adoption'
key_names = [average_monthly_internet_costs, average_after_tax_wages, GDP_per_capita, internet_adoption]
df_list = {}
df_list_orig = {}
path = ''
for pathval in pathvals:
if os.path.exists(pathval):
path = pathval
break
for fname in fnames:
df = pd.read_csv(os.path.join(path, fname))
key = fname[:-4]
if key == average_monthly_internet:
key = average_monthly_internet_costs
df_list_orig[key] = df
df_list[key] = df.dropna(axis=1, how='all')
df_list[key].set_index('Country', inplace=True)
df = df_list[GDP_per_capita]
columns = df.columns
for column in columns:
df[column] = df[column].str.replace(',', '')
df[column] = df[column].astype(float)
change_from_13to20 = 'change_from_13to20'
change_from_13to21 = 'change_from_13to21'
change_from_13to20_pct = 'change_from_13to20_pct'
change_from_13to21_pct = 'change_from_13to21_pct'
_2013 = '2013'
_2020 = '2020'
_2021 = '2021'
df_analysis = {}
for key_name in key_names:
df = df_list[key_name]
df_new = pd.DataFrame(index=df.index, columns=[change_from_13to20, change_from_13to21, change_from_13to20_pct, change_from_13to21_pct])
df_new[change_from_13to20] = df[_2020] - df[_2013]
df_new[change_from_13to20_pct] = (df[_2020] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to20_pct] = df_new[change_from_13to20_pct].round(decimals=2)
if _2021 in df.columns:
df_new[change_from_13to21] = df[_2021] - df[_2013]
df_new[change_from_13to21_pct] = (df[_2021] - df[_2013]) / df[_2013] * 100
df_new[change_from_13to21_pct] = df_new[change_from_13to21_pct].round(decimals=2)
df_analysis[key_name] = df_new
caption_string_neg = '{table}: Negative Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
caption_string_pos = '{table}: Positive Changes from {start_year} to {end_year}. Size {shape}. <br> {additional}'
format_dict_nopct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}'}
format_dict_pct = {change_from_13to20: '{:.2f}', change_from_13to21: '{:.2f}', change_from_13to20_pct: '{:.2f}%', change_from_13to21_pct: '{:.2f}%'}
def display_negative_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] < -5].sort_values(change_from_13to20_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to20] < -5].sort_values(change_from_13to20, ascending=True)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] < -5].sort_values(change_from_13to21_pct, ascending=True)
else:
df = df_temp[df_temp[change_from_13to21] < -5].sort_values(change_from_13to21, ascending=True)
if format_dict is None:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_neg.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
def display_positive_values(table_name, df=df_analysis, end_year=_2021, format_dict=format_dict_pct, caption_additional=''):
df_temp = df[table_name]
if end_year == _2020:
if change_from_13to20_pct in df_temp:
df = df_temp[df_temp[change_from_13to20_pct] > 5].sort_values(change_from_13to20_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to20] > 5].sort_values(change_from_13to20, ascending=False)
if end_year == _2021:
if change_from_13to21_pct in df_temp:
df = df_temp[df_temp[change_from_13to21_pct] > 5].sort_values(change_from_13to21_pct, ascending=False)
else:
df = df_temp[df_temp[change_from_13to21] > 5].sort_values(change_from_13to21, ascending=False)
if format_dict is None:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional))
else:
return df.style.set_caption(caption_string_pos.format(table=table_name, start_year=_2013, end_year=end_year, shape=df.shape, additional=caption_additional)).format(format_dict)
df_rank = {}
for key_name in key_names:
if key_name != average_monthly_internet_costs:
df_rank[key_name] = df_list[key_name].rank(ascending=False).astype(int)
df_rank[average_monthly_internet_costs] = df_list[average_monthly_internet_costs].rank(ascending=True).astype(int)
df_rank_top_bottom = {}
for key_name in key_names:
df = df_rank[key_name]
df_new = pd.DataFrame(columns=df.columns)
index = []
for column in df.columns:
df_temp = df[[column]].sort_values(column)
sliced_df = pd.concat([])
df_new[column] = sliced_df.index
index = sliced_df[column]
df_new = df_new.set_index(index)
df_new.index.name = 'rank'
df_rank_top_bottom[key_name] = df_new
caption_string = '{table}: Top 5 and Bottom 5 Rank of each year'
table_name = internet_adoption
df_rank_top_bottom[table_name].style.set_caption(caption_string.format(table=table_name)) | code |
32071401/cell_13 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
#Exploratory Data Analysis
#Petal Length VS Petal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='x',color='#270c8c',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='o',color='#d5081e',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='>',color='#45aa53',label='Virginica',ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
sns.violinplot(x='Species', y='SepalLengthCm', palette='muted', inner='quartile', data=Iris)
plt.subplot(2, 2, 2)
sns.violinplot(x='Species', y='SepalWidthCm', palette='muted', inner='quartile', data=Iris)
plt.subplot(2, 2, 3)
sns.violinplot(x='Species', y='PetalLengthCm', palette='muted', inner='quartile', data=Iris)
plt.subplot(2, 2, 4)
sns.violinplot(x='Species', y='PetalWidthCm', palette='muted', inner='quartile', data=Iris)
plt.show() | code |
32071401/cell_4 | [
"image_output_1.png"
] | import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
Iris.head(n=10) | code |
32071401/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
#Exploratory Data Analysis
#Petal Length VS Petal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='x',color='#270c8c',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='o',color='#d5081e',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='>',color='#45aa53',label='Virginica',ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
fig = plt.gcf()
fig.set_size_inches(15, 9)
Iris.plot.area(y=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'], alpha=0.5, figsize=(13, 9)) | code |
32071401/cell_2 | [
"image_output_1.png"
] | import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.head(n=10) | code |
32071401/cell_1 | [
"text_plain_output_1.png"
] | import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32071401/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
fig = Iris[Iris.Species == 'Iris-setosa'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', marker='x', color='#fa6c33', label='Setosa')
fig = Iris[Iris.Species == 'Iris-versicolor'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', marker='*', color='#3c8991', label='Versicolor', ax=fig)
fig = Iris[Iris.Species == 'Iris-virginica'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', marker='D', color='#d5081e', label='Virginica', ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig = plt.gcf()
fig.set_size_inches(10, 6)
sns.set_style('darkgrid')
plt.show() | code |
32071401/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
#Exploratory Data Analysis
#Petal Length VS Petal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='x',color='#270c8c',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='o',color='#d5081e',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='>',color='#45aa53',label='Virginica',ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
fig = plt.gcf()
fig.set_size_inches(15, 9)
pairplot = sns.pairplot(Iris, hue='Species', palette='husl', diag_kind='kde', kind='scatter') | code |
32071401/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
#Exploratory Data Analysis
#Petal Length VS Petal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='x',color='#270c8c',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='o',color='#d5081e',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='>',color='#45aa53',label='Virginica',ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
Iris.hist(edgecolor='black', bins=15, color='#C11321', linewidth=1.8)
fig = plt.gcf()
fig.set_size_inches(15, 9)
plt.show() | code |
32071401/cell_3 | [
"image_output_1.png"
] | import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.info()
Iris.isnull().sum() | code |
32071401/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
#Exploratory Data Analysis
#Petal Length VS Petal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='x',color='#270c8c',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='o',color='#d5081e',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='PetalLengthCm',y='PetalWidthCm',marker='>',color='#45aa53',label='Virginica',ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
fig = plt.gcf()
fig.set_size_inches(15, 9)
# Pair Plot using seaborn library
pairplot=sns.pairplot(Iris,hue='Species',palette='husl',diag_kind="kde",kind='scatter')
Iris.plot.area(y=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'], alpha=0.5, figsize=(13, 9))
fig = plt.gcf()
fig.set_size_inches(13, 9)
fig = sns.heatmap(Iris.corr(), annot=True, cmap='YlGnBu', linewidths=1.5, linecolor='k', vmin=0.5, vmax=1.0, square=True, cbar_kws={'orientation': 'vertical'}, cbar=True) | code |
32071401/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data Processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
Iris = pd.read_csv('../input/iris/Iris.csv')
Iris.isnull().sum()
Iris.drop('Id', axis=1, inplace=True)
#Exploratory Data Analysis
#Sepal Length VS Sepal Width
fig=Iris[Iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='x',color='#fa6c33',label='Setosa')
fig=Iris[Iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='*',color='#3c8991',label='Versicolor',ax=fig)
fig=Iris[Iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',marker='D',color='#d5081e',label='Virginica',ax=fig)
fig.set_xlabel('Sepal Length')
fig.set_ylabel('Sepal Width')
fig.set_title('Sepal Length VS Sepal Width')
fig=plt.gcf()
fig.set_size_inches(10,6)
sns.set_style("darkgrid")
plt.show()
fig = Iris[Iris.Species == 'Iris-setosa'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', marker='x', color='#270c8c', label='Setosa')
fig = Iris[Iris.Species == 'Iris-versicolor'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', marker='o', color='#d5081e', label='Versicolor', ax=fig)
fig = Iris[Iris.Species == 'Iris-virginica'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', marker='>', color='#45aa53', label='Virginica', ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title('Petal Length VS Petal Width')
fig = plt.gcf()
fig.set_size_inches(10, 6)
sns.set_style('darkgrid')
plt.show() | code |
104124854/cell_4 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) | code |
104124854/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
dfz = trans(X_test)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
enc_df = pd.DataFrame(ohe.transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
dfz[num] = mmc.transform(dfz[num])
df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
df = trans(df)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
enc_df = pd.DataFrame(ohe.transform(df[cat]).toarray(), columns=ohe.get_feature_names_out())
df = df.reset_index(drop=True).join(enc_df)
df.drop(['Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
df[num] = mmc.transform(df[num])
df.head() | code |
104124854/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
dfz.head() | code |
104124854/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104124854/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
dfz = trans(X_test)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
enc_df = pd.DataFrame(ohe.transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
dfz[num] = mmc.transform(dfz[num])
df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
df = trans(df)
df.head() | code |
104124854/cell_16 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
X_train = dfz
y_train = y_train.reset_index(drop=True)
dfz = trans(X_test)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
enc_df = pd.DataFrame(ohe.transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
dfz[num] = mmc.transform(dfz[num])
X_test = dfz
y_test = y_test.reset_index(drop=True)
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score(y_test, y_pred) | code |
104124854/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
df.head() | code |
104124854/cell_17 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
dfz = trans(X_test)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
enc_df = pd.DataFrame(ohe.transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
dfz[num] = mmc.transform(dfz[num])
df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
df.head() | code |
104124854/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
X_train = dfz
y_train = y_train.reset_index(drop=True)
model = RandomForestClassifier()
model.fit(X_train, y_train) | code |
104124854/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
X = df.drop('Transported', 1)
y = df['Transported'].apply(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
def trans(df):
df['Age'].fillna(df.Age.mean(), inplace=True)
df['RoomService'].fillna(df.RoomService.mean(), inplace=True)
df['FoodCourt'].fillna(df.FoodCourt.mean(), inplace=True)
df['ShoppingMall'].fillna(df.ShoppingMall.mean(), inplace=True)
df['Spa'].fillna(df.Spa.mean(), inplace=True)
df['VRDeck'].fillna(df.VRDeck.mean(), inplace=True)
df['HomePlanet'].fillna(df['HomePlanet'].mode()[0], inplace=True)
df['Destination'].fillna(df['Destination'].mode()[0], inplace=True)
df['CryoSleep'].fillna(df['CryoSleep'].mode()[0], inplace=True)
df['VIP'].fillna(df['VIP'].mode()[0], inplace=True)
df['Cabin'].fillna(df['Cabin'].mode()[0], inplace=True)
df['CryoSleep'] = df['CryoSleep'].apply(int)
df[['Deck', 'Num', 'Side']] = df['Cabin'].str.split('/', expand=True)
df['Num'] = df['Num'].astype(int)
return df
dfz = trans(X_train)
cat = ['HomePlanet', 'Deck', 'Side', 'Destination']
ohe = OneHotEncoder(drop='first')
enc_df = pd.DataFrame(ohe.fit_transform(dfz[cat]).toarray(), columns=ohe.get_feature_names_out())
dfz = dfz.reset_index(drop=True).join(enc_df)
dfz.drop(['PassengerId', 'Name', 'VIP', 'Cabin', 'HomePlanet', 'Deck', 'Side', 'Destination'], axis=1, inplace=True)
num = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mmc = MinMaxScaler()
dfz[num] = mmc.fit_transform(dfz[num])
dfz = trans(X_test)
dfz.head() | code |
32071416/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
df['Cases'] = df['Cases'].astype(int)
df = df[df['Cases'] > 0].reset_index(drop=True)
df.head() | code |
32071416/cell_4 | [
"text_html_output_1.png"
] | !curl -o $confirmed_csv $confirmed_gitpath | code |
32071416/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df.head() | code |
32071416/cell_2 | [
"text_html_output_1.png"
] | !pip install folium | code |
32071416/cell_11 | [
"text_plain_output_1.png"
] | import math
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
df['Cases'] = df['Cases'].astype(int)
df = df[df['Cases'] > 0].reset_index(drop=True)
df_alarming_cities=df.sort_values(by='Cases', ascending=False).groupby('Country/Region').head(1).reset_index(drop=True)
df_alarming_cities=df_alarming_cities.head(n=10)
df_alarming_cities
import math
total_incidents = df['Cases'].sum()
def geojsons(df):
features = []
for _, row in df.iterrows():
feature = {'type': 'Feature', 'geometry': {'type': 'Point', 'coordinates': [row['Long'], row['Lat']]}, 'properties': {'time': pd.to_datetime(row['Date'], format='%Y-%m-%d').__str__(), 'style': {'color': ''}, 'icon': 'circle', 'iconstyle': {'fillColor': 'red', 'fillOpacity': 0.8, 'stroke': 'true', 'radius': math.log(row['Cases'])}}}
features.append(feature)
return features
start_geojson = geojsons(df)
start_geojson | code |
32071416/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32071416/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df.head() | code |
32071416/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
df.info() | code |
32071416/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
df['Cases'] = df['Cases'].astype(int)
df = df[df['Cases'] > 0].reset_index(drop=True)
df_alarming_cities = df.sort_values(by='Cases', ascending=False).groupby('Country/Region').head(1).reset_index(drop=True)
df_alarming_cities = df_alarming_cities.head(n=10)
df_alarming_cities | code |
32071416/cell_12 | [
"text_html_output_1.png"
] | from folium.plugins import TimestampedGeoJson
import folium
import math
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='Date', value_name='Cases')
df['Date'] = df['Date'].str.replace('(/d+)/(\\d+)/(\\d+)', '20\\3-\\1-\\2')
df['Date'] = pd.to_datetime(df['Date'])
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
df['Cases'] = df['Cases'].astype(int)
df = df[df['Cases'] > 0].reset_index(drop=True)
df_alarming_cities=df.sort_values(by='Cases', ascending=False).groupby('Country/Region').head(1).reset_index(drop=True)
df_alarming_cities=df_alarming_cities.head(n=10)
df_alarming_cities
import math
total_incidents = df['Cases'].sum()
def geojsons(df):
features = []
for _, row in df.iterrows():
feature = {'type': 'Feature', 'geometry': {'type': 'Point', 'coordinates': [row['Long'], row['Lat']]}, 'properties': {'time': pd.to_datetime(row['Date'], format='%Y-%m-%d').__str__(), 'style': {'color': ''}, 'icon': 'circle', 'iconstyle': {'fillColor': 'red', 'fillOpacity': 0.8, 'stroke': 'true', 'radius': math.log(row['Cases'])}}}
features.append(feature)
return features
start_geojson = geojsons(df)
start_geojson
import folium
from folium.plugins import TimestampedGeoJson
m = folium.Map(location=[50, 30], zoom_start=2, tiles='Stamen Toner')
for _, row in df_alarming_cities.iterrows():
folium.Marker(location=[row['Lat'], row['Long']], icon=folium.Icon(color='black', icon='ambulance', prefix='fa'), popup=row['Province/State']).add_to(m)
TimestampedGeoJson(start_geojson, period='P1D', duration='PT1M', transition_time=2000, auto_play=True).add_to(m)
m | code |
32071416/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
confirmed_csv = 'corona_confirmed.csv'
confirmed_gitpath = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
import pandas as pd
df = pd.read_csv(confirmed_csv)
df.head() | code |
16162621/cell_42 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.head() | code |
16162621/cell_63 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason']
df_hour = df.groupby(by=['Day of Week', 'Hour']).count()['Reason'].unstack()
df_month = df.groupby(by=['Day of Week', 'Month']).count()['Reason'].unstack()
df_month.head() | code |
16162621/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
df['Departments'].value_counts() | code |
16162621/cell_57 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason']
df_hour = df.groupby(by=['Day of Week', 'Hour']).count()['Reason'].unstack()
df_hour.head() | code |
16162621/cell_34 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.countplot(x='Day of Week', data=df, hue='Reason', palette='viridis')
sns.despine(left=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show() | code |
16162621/cell_44 | [
"text_html_output_1.png"
] | from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
import plotly.plotly as py
import plotly.graph_objs as go
init_notebook_mode(connected=True)
cf.go_offline() | code |
16162621/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.countplot(x='Reason', data=df, palette='magma')
sns.despine(left=True) | code |
16162621/cell_55 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason'] | code |
16162621/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df.head() | code |
16162621/cell_40 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.despine(left=True)
sns.despine(left=True)
Month_grouping = df.groupby('Month').count()
Month_grouping['twp'].plot()
plt.show() | code |
16162621/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
df.head() | code |
16162621/cell_26 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
type(df['timeStamp'].iloc[0]) | code |
16162621/cell_65 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.despine(left=True)
sns.despine(left=True)
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason']
df_hour = df.groupby(by=['Day of Week', 'Hour']).count()['Reason'].unstack()
sns.clustermap(df_hour, cmap='coolwarm', linecolor='white', linewidths=1)
df_month = df.groupby(by=['Day of Week', 'Month']).count()['Reason'].unstack()
plt.figure(figsize=(15, 7))
sns.heatmap(df_month, cmap='magma', linecolor='white', linewidths=1)
plt.show() | code |
16162621/cell_48 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df[df['Reason'] == 'EMS'].groupby('Date').count()['lat'].iplot(kind='line') | code |
16162621/cell_61 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.despine(left=True)
sns.despine(left=True)
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason']
df_hour = df.groupby(by=['Day of Week', 'Hour']).count()['Reason'].unstack()
sns.clustermap(df_hour, cmap='coolwarm', linecolor='white', linewidths=1)
plt.show() | code |
16162621/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
df['Reason'].value_counts().head(1) | code |
16162621/cell_50 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df[df['Reason'] == 'Fire'].groupby('Date').count()['lat'].iplot(kind='line') | code |
16162621/cell_52 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df[df['Reason'] == 'Traffic'].groupby('Date').count()['lat'].iplot(kind='line') | code |
16162621/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16162621/cell_45 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby('Date').count()['lat'].iplot(kind='line') | code |
16162621/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
df.head() | code |
16162621/cell_59 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.despine(left=True)
sns.despine(left=True)
Month_grouping = df.groupby('Month').count()
df['Date'] = df['timeStamp'].apply(lambda t: t.date())
df.groupby(by=['Day of Week', 'Hour']).count()['Reason']
df_hour = df.groupby(by=['Day of Week', 'Hour']).count()['Reason'].unstack()
plt.figure(figsize=(15, 7))
sns.heatmap(df_hour, cmap='magma', linecolor='white', linewidths=1)
plt.show() | code |
16162621/cell_8 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['zip'].value_counts().head(5) | code |
16162621/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df.head() | code |
16162621/cell_38 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
Month_grouping = df.groupby('Month').count()
Month_grouping.head() | code |
16162621/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
df.head() | code |
16162621/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
type(df['timeStamp'].iloc[0]) | code |
16162621/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['twp'].value_counts().head(5) | code |
16162621/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/911.csv')
df['title'].nunique() | code |
16162621/cell_36 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/911.csv')
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0])
df['Departments'] = df['title'].apply(lambda title: title.split(':')[1])
sns.despine(left=True)
sns.despine(left=True)
sns.countplot(x='Month', data=df, hue='Reason', palette='magma')
sns.despine(left=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show() | code |
325103/cell_4 | [
"text_plain_output_1.png"
] | from sklearn import linear_model, svm, metrics
from sklearn import linear_model, svm, metrics
classifier = linear_model.SGDClassifier(n_iter=100, n_jobs=6, penalty='l1')
print(classifier) | code |
325103/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
325103/cell_5 | [
"text_plain_output_1.png"
] | from sklearn import linear_model, svm, metrics
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/train.csv')
target = dataset[[0]].values.ravel()
train = dataset.iloc[:, 1:].values
test = pd.read_csv('../input/test.csv').values
from sklearn import linear_model, svm, metrics
classifier = linear_model.SGDClassifier(n_iter=100, n_jobs=6, penalty='l1')
classifier.fit(train, target) | code |
90118084/cell_9 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5) | code |
90118084/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
countries['Vanuatu'].head() | code |
90118084/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
countries['Mali'].head() | code |
90118084/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
data.head() | code |
90118084/cell_29 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='white', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_gender_dying_corr = {}
country_gender_suicide_corr = {}
for val, df in data.groupby('Country'):
corr_gender_dying = df['ProbDyingMale'].corr(df['ProbDyingFemale'])
corr_gender_suicide = df['SuicideMale'].corr(df['SuicideFemale'])
country_gender_dying_corr[val] = corr_gender_dying
country_gender_suicide_corr[val] = corr_gender_suicide
visualize_word_counts(data.groupby('Country').mean()['SuicideBoth'].to_dict()) | code |
90118084/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_gender_dying_corr = {}
country_gender_suicide_corr = {}
for val, df in data.groupby('Country'):
corr_gender_dying = df['ProbDyingMale'].corr(df['ProbDyingFemale'])
corr_gender_suicide = df['SuicideMale'].corr(df['SuicideFemale'])
country_gender_dying_corr[val] = corr_gender_dying
country_gender_suicide_corr[val] = corr_gender_suicide
if val == 'Turkey':
display(df) | code |
90118084/cell_19 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
def repeated_measures_effect_size(country, col1, col2):
col1, col2 = (countries[country][col1], countries[country][col2])
m1, m2 = (np.mean(col1), np.mean(col2))
s1, s2 = (np.std(col1), np.std(col2))
r = col1.corr(col2)
s_z = np.sqrt(s1 ** 2 + s2 ** 2 - 2 * r * s1 * s2)
s_rm = s_z / np.sqrt(2 * (1 - r))
return (m1 - m2) / s_rm
effect_sizes_dying = {c: repeated_measures_effect_size(c, 'ProbDyingMale', 'ProbDyingFemale') for c in countries}
effect_sizes_dying = dict(sorted(effect_sizes_dying.items(), key=lambda x: x[1]))
for c in list(effect_sizes_dying.keys())[:5]:
print(c) | code |
90118084/cell_24 | [
"text_plain_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def visualize_word_counts(counts):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='white', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
def repeated_measures_effect_size(country, col1, col2):
col1, col2 = (countries[country][col1], countries[country][col2])
m1, m2 = (np.mean(col1), np.mean(col2))
s1, s2 = (np.std(col1), np.std(col2))
r = col1.corr(col2)
s_z = np.sqrt(s1 ** 2 + s2 ** 2 - 2 * r * s1 * s2)
s_rm = s_z / np.sqrt(2 * (1 - r))
return (m1 - m2) / s_rm
plt.figure(figsize=(15, 5))
plt.hist(effect_sizes.values(), bins=30)
plt.xticks(np.arange(-10, 41, 5))
plt.grid()
plt.show() | code |
90118084/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
countries['Germany'].head() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.