path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
18124779/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x='date', y='rider_performance', rot=0)
rider_performance_graph.set_xlabel('Date')
rider_performance_graph.set_ylabel('Rider perforamce') | code |
18124779/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18124779/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x='date', y='horse_performance', rot=0)
horse_performance_graph.set_xlabel('Date')
horse_performance_graph.set_ylabel('Horse perforamce') | code |
18124779/cell_8 | [
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x='date', y='avg_performance', rot=0)
avg_performance_graph.set_xlabel('Date')
avg_performance_graph.set_ylabel('Average perforamce') | code |
18124779/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df | code |
18124779/cell_10 | [
"text_html_output_1.png"
] | from pandas import DataFrame
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
performance_df = pd.DataFrame({'Rider performance': df["rider_performance"],
'Horse performance': df["horse_performance"]})
perfrormance_graph_comparison1 = performance_df.plot.bar(rot=0)
performance_df2 = pd.DataFrame({'Rider performance': df['rider_performance'], 'Horse performance': df['horse_performance'], 'Average performance': df['avg_performance']})
perfrormance_graph_comparison2 = performance_df2.plot.bar(rot=0) | code |
18124779/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x='date', y='km', rot=0)
km_graph.set_xlabel('Date')
km_graph.set_ylabel('Km') | code |
130011284/cell_6 | [
"text_plain_output_1.png"
] | import pandas
train = pandas.read_csv('/kaggle/input/loan-status-binary-classification/train.csv')
test = pandas.read_csv('/kaggle/input/loan-status-binary-classification/test.csv')
for column in train.columns:
print(column, train[column].isnull().sum()) | code |
130011284/cell_3 | [
"text_plain_output_1.png"
] | import pandas
import numpy
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.linear_model import LogisticRegression | code |
130011284/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
import numpy
import pandas
train = pandas.read_csv('/kaggle/input/loan-status-binary-classification/train.csv')
test = pandas.read_csv('/kaggle/input/loan-status-binary-classification/test.csv')
train['Total_Income'] = train['Applicant_Income'] + train['Coapplicant_Income']
test['Total_Income'] = test['Applicant_Income'] + test['Coapplicant_Income']
train['Applicant_Proportion'] = train['Applicant_Income'] / train['Total_Income']
test['Applicant_Proportion'] = test['Applicant_Income'] / test['Total_Income']
train['Loan_Income_Ratio'] = train['Loan_Amount'] / train['Total_Income']
test['Loan_Income_Ratio'] = test['Loan_Amount'] / test['Total_Income']
train['Loan_Term_Ratio'] = train['Loan_Amount'] / train['Term']
test['Loan_Term_Ratio'] = test['Loan_Amount'] / test['Term']
train['Income_Term_Product'] = train['Total_Income'] * train['Term']
test['Income_Term_Product'] = test['Total_Income'] * test['Term']
def Education_Area_Interaction(dataFrame):
Eduction_Encode = pandas.get_dummies(dataFrame['Education'], prefix='Education')
Area_Encode = pandas.get_dummies(dataFrame['Area'], prefix='Area')
for Education_Column in Eduction_Encode.columns:
for Area_Column in Area_Encode.columns:
dataFrame[f'{Education_Column}_X_{Area_Column}'] = Eduction_Encode[Education_Column] * Area_Encode[Area_Column]
return dataFrame
train = Education_Area_Interaction(train)
test = Education_Area_Interaction(test)
def Dependents_Education_Interaction(dataFrame):
Dependents_Encode = pandas.get_dummies(dataFrame['Dependents'], prefix='Dependents')
Education_Encode = pandas.get_dummies(dataFrame['Education'], prefix='Education')
for Dependents_Column in Dependents_Encode.columns:
for Education_Column in Education_Encode.columns:
dataFrame[f'{Dependents_Column}_X_{Education_Column}'] = Dependents_Encode[Dependents_Column] * Education_Encode[Education_Column]
return dataFrame
train = Dependents_Education_Interaction(train)
test = Dependents_Education_Interaction(test)
categorical_variables = ['Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'Credit_History', 'Area'] + [column for column in train.columns if '_X_' in column]
encoder = OneHotEncoder()
all_data = pandas.concat([train, test], axis=0)
all_data_encoded = encoder.fit_transform(all_data[categorical_variables])
train_encoded = all_data_encoded[:train.shape[0]]
test_encoded = all_data_encoded[train.shape[0]:]
numerical_variables = ['Applicant_Income', 'Coapplicant_Income', 'Loan_Amount', 'Term', 'Total_Income', 'Applicant_Proportion', 'Loan_Income_Ratio', 'Loan_Term_Ratio', 'Income_Term_Product']
scaler = MinMaxScaler()
all_data_scaled = scaler.fit_transform(all_data[numerical_variables])
train_scaled = all_data_scaled[:train.shape[0]]
test_scaled = all_data_scaled[train.shape[0]:]
train_processed = numpy.hstack((train_encoded.toarray(), train_scaled))
test_processed = numpy.hstack((test_encoded.toarray(), test_scaled))
model = LogisticRegression()
model.fit(train_processed, train['Status'])
print(model.score(train_processed, train['Status']))
predictions = model.predict(test_processed)
submission = pandas.DataFrame({'id': test['id'], 'Status': predictions})
submission.to_csv('/kaggle/working/submission.csv', index=False) | code |
50233625/cell_15 | [
"text_html_output_2.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import plotly.graph_objects as go
data_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='ISO-8859-1', low_memory=False)
data_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', low_memory=False)
data_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv', low_memory=False)
data_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
def consolidate_country_2017_to_2020(x):
if (x == 'United States') | (x == 'United States of America'):
return 'USA'
elif (x == 'United Kingdom of Great Britain and Northern Ireland') | (x == 'United Kingdom'):
return 'UK'
elif x == "People 's Republic of China":
return 'China'
elif x == 'Iran, Islamic Republic of...':
return 'Iran'
else:
return x
def consolidate_salary_2017(x):
try:
x = int(x.replace(',', ''))
if x < 30000:
return '0-30k'
if x < 50000:
return '30-50k'
elif x < 100000:
return '50-100k'
elif x < 150000:
return '100-150k'
elif x < 200000:
return '150-200k'
elif x < 250000:
return '200-250k'
elif x < 500000:
return '250-500k'
else:
return '$500k+'
except:
return 'No compensation disclosed'
def consolidate_salary_2018(x):
if (x == '0-10,000') | (x == '10-20,000') | (x == '20-30,000'):
return '0-30k'
elif (x == '30-40,000') | (x == '40-50,000'):
return '30-50k'
elif (x == '50-60,000') | (x == '60-70,000') | (x == '70-80,000') | (x == '80-90,000') | (x == '90-100,000'):
return '50-100k'
elif (x == '100-125,000') | (x == '125-150,000'):
return '100-150k'
elif x == '150-200,000':
return '150-200k'
elif x == '200-250,000':
return '200-250k'
elif (x == '250-300,000') | (x == '300-400,000') | (x == '400-500,000'):
return '250-500k'
elif x == '500,000+':
return '$500k+'
else:
return 'No compensation disclosed'
def consolidate_salary_2019_2020(x):
if (x == '$0-999') | (x == '1,000-1,999') | (x == '2,000-2,999') | (x == '3,000-3,999') | (x == '4,000-4,999') | (x == '5,000-7,499') | (x == '7,500-9,999') | (x == '10,000-14,999') | (x == '15,000-19,999') | (x == '20,000-24,999') | (x == '25,000-29,999'):
return '0-30k'
elif (x == '30,000-39,999') | (x == '40,000-49,999'):
return '30-50k'
elif (x == '50,000-59,999 ') | (x == '60,000-69,999') | (x == '70,000-79,999') | (x == '80,000-89,999') | (x == '90,000-99,999'):
return '50-100k'
elif (x == '100,000-124,999') | (x == '125,000-149,999'):
return '100-150k'
elif x == '150,000-199,999':
return '150-200k'
elif x == '200,000-249,999':
return '200-250k'
elif (x == '250,000-299,999') | (x == '300,000-500,000'):
return '250-500k'
elif x == '> $500,000':
return '$500k+'
else:
return 'No compensation disclosed'
def consolidate_sex_2017_to_2020(x):
if (x == 'Prefer not to say ') | (x == 'Prefer to self-describe') | (x == 'Nonbinary') | (x == 'A different identity') | (x == 'Prefer not to say') | (x == 'Non-binary, genderqueer, or gender non-conforming'):
return 'Other'
elif x == 'Man':
return 'Male'
elif x == 'Woman':
return 'Female'
else:
return x
def consolidate_job_titles_2017_to_2020(x):
if (x == 'Data Scientist') | (x == 'Machine Learning Engineer') | (x == 'Predictive Modeler'):
return 'Data science/ML engineers'
elif (x == 'Software Engineer') | (x == 'Software Developer/Software Engineer') | (x == 'Programmer') | (x == 'Developer Advocate') | (x == 'Computer Scientist'):
return 'Software developers'
elif (x == 'Data Analyst') | (x == 'Business Analyst') | (x == 'Data Miner') | (x == 'Marketing Analyst') | (x == 'Data Journalist'):
return 'Data/business analysts'
elif (x == 'DBA/Database Engineer') | (x == 'Data Engineer') | (x == 'Data Miner') | (x == 'Marketing Analyst') | (x == 'Data Journalist'):
return 'Data engineers'
elif (x == 'Student') | (x == 'Researcher'):
return 'Students'
else:
return 'Other job fields'
def consolidate_age_2017(x):
if x < 25:
return 'Under 25'
elif x < 35:
return '25-35'
elif x < 45:
return '35-45'
elif x < 60:
return '45-60'
elif x < 70:
return '60-69'
else:
return '70+'
def consolidate_age_2018_to_2020(x):
if (x == '18-21') | (x == '22-24'):
return 'Under 25'
elif (x == '25-29') | (x == '30-34'):
return '25-35'
elif (x == '35-39') | (x == '40-44'):
return '35-45'
elif (x == '45-49') | (x == '50-54') | (x == '55-59'):
return '45-60'
else:
return x
def consolidate_vizmodules_2018_to_2020(x):
if x == 'MatplotlibSeaborn':
return 'Matplotlib & Seaborn'
elif (x == 'Plotly / Plotly Express') | (x == 'Plotly'):
return 'Plotly Alone'
elif (x == 'MatplotlibPlotly / Plotly Express') | (x == 'MatplotlibPlotly'):
return 'Matplotlib & Plotly'
elif (x == 'MatplotlibSeabornPlotly / Plotly ExpressBokeh') | (x == 'MatplotlibPlotly / Plotly ExpressBokehSeaborn') | (x == 'MatplotlibPlotlyBokehSeaborn'):
return 'All 4 modules'
elif x == 'Bokeh':
return 'Bokeh Alone'
elif x == 'MatplotlibBokeh':
return 'Matplotlib & Bokeh'
elif x == 'Seaborn':
return 'Seaborn Alone'
elif x == 'Matplotlib':
return 'Matplotlib Alone'
elif x == 'Shiny':
return 'Shiny Alone'
elif (x == 'Ggplot / ggplot2') | (x == 'ggplot2'):
return 'Ggplot Alone'
elif (x == 'Ggplot / ggplot2Shiny') | (x == 'ggplot2Shiny'):
return 'Both Modules'
else:
return x
def consolidate_mlmodules_2018_to_2020(x):
if (x == 'Scikit-learn') | (x == 'Scikit-Learn') | (x == 'Scikit-LearnrandomForest') | (x == 'Scikit-learnRandomForest') | (x == 'Scikit-learnCaret') | (x == 'randomForest') | (x == 'Caret') | (x == 'Scikit-learnRandomForestCaret') | (x == 'CaretrandomForest') | (x == 'RandomForest') | (x == 'Scikit-LearnCaret') | (x == 'RandomForestCaret') | (x == 'Scikit-LearnCaretrandomForest'):
return 'Classic ML modules'
elif (x == 'TensorFlow') | (x == 'TensorFlowKeras') | (x == 'PyTorch') | (x == 'TensorFlowKerasPyTorch') | (x == 'Keras') | (x == 'TensorFlowPyTorch') | (x == 'KerasPyTorch') | (x == 'TensorFlowKerasPyTorchFast.ai') | (x == 'TensorFlowKerasPyTorchFastai') | (x == 'PyTorchFast.ai') | (x == 'TensorFlowKerasFastai') | (x == 'Fast.ai') | (x == 'TensorFlowKerasFast.ai') | (x == 'KerasPyTorchFast.ai') | (x == 'TensorFlowPyTorchFast.ai') | (x == 'Fastai') | (x == 'PyTorchFastai') | (x == 'KerasFast.ai') | (x == 'TensorFlowPyTorchFastai') | (x == 'TensorFlowFast.ai ') | (x == 'KerasPyTorchFastai') | (x == 'TensorFlowFastai') | (x == 'KerasFastai') | (x == 'TensorFlowFast.ai') | (x == 'Xgboost') | (x == 'XgboostLightGBM') | (x == 'Xgboostlightgbm') | (x == 'Xgboostlightgbmcatboost') | (x == 'XgboostLightGBMCatBoost') | (x == 'LightGBM') | (x == 'Xgboostcatboost') | (x == 'XgboostCatBoost') | (x == 'lightgbm') | (x == 'CatBoost') | (x == 'catboost') | (x == 'LightGBMCatBoost') | (x == 'lightgbmcatboost') | (x == 'TensorFlowKerasXgboost') | (x == 'TensorFlowKerasPyTorchXgboost') | (x == 'TensorFlowKerasXgboostLightGBM') | (x == 'TensorFlowXgboost') | (x == 'KerasXgboost') | (x == 'TensorFlowKerasPyTorchXgboostLightGBM') | (x == 'TensorFlowKerasXgboostlightgbm') | (x == 'PyTorchXgboost') | (x == 'TensorFlowKerasXgboostlightgbmcatboost') | (x == 'KerasXgboostLightGBM') | (x == 'KerasPyTorchXgboost') | (x == 'KerasPyTorchXgboostLightGBM') | (x == 'PyTorchLightGBM') | (x == 'TensorFlowKerasPyTorchFast.aiXgboostLightGBM ') | (x == 'TensorFlowKerasPyTorchXgboostLightGBMCatBoost') | (x == 'PyTorchXgboostLightGBM') | (x == 'TensorFlowKerasXgboostLightGBMCatBoost') | (x == 'TensorFlowKerasPyTorchXgboostlightgbm') | (x == 'TensorFlowPyTorchXgboost') | (x == 'TensorFlowKerasPyTorchXgboostlightgbmcatboost'):
return 'Deep Learning & Boosting modules'
else:
return x
# Merge all survey data from all years into one dataframe and unify responses as much as possible
consolidated_data = pd.DataFrame()
year_2017 = pd.DataFrame()
year_2018 = pd.DataFrame()
year_2019 = pd.DataFrame()
year_2020 = pd.DataFrame()
#'17
year_2017['Salary'] = data_2017.CompensationAmount.apply(lambda x: consolidate_salary_2017(x))
year_2017['Country'] = data_2017.Country.apply(lambda x: consolidate_country_2017_to_2020(x))
year_2017['Age'] = data_2017.Age.drop(0).apply(lambda x: consolidate_age_2017(x))
year_2017['Job_field'] = data_2017.CurrentJobTitleSelect.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2017['Sex'] = data_2017.GenderSelect.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2017['Year'] = 2017
#'18
year_2018['Salary'] = data_2018.Q9.apply(lambda x: consolidate_salary_2018(x))
year_2018['Country'] = data_2018.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2018['Age'] = data_2018.Q2.drop(0).replace({'70-79' : '70+', '80+' : '70+'})
year_2018['Age'] = year_2018.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2018['Job_field'] = data_2018.Q6.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2018['Sex'] = data_2018.Q1.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2018['Visual_modules'] = (data_2018['Q21_Part_1'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_2'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_3'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_5'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_6'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_7'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_9'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_10'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_11'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_9'].fillna('').drop(0).str.strip(' '))
year_2018['Visual_modules'] = year_2018.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2018['ML_modules'] = (data_2018['Q19_Part_1'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_2'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_3'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_5'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_6'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_7'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_9'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_10'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_11'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_13'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_14'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_15'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_16'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_17'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_18'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_19'].fillna('').drop(0).str.strip(' '))
year_2018['ML_modules'] = year_2018.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2018['Year'] = 2018
#'19
year_2019['Salary'] = data_2019.Q10.apply(lambda x: consolidate_salary_2019_2020(x))
year_2019['Country'] = data_2019.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2019['Age'] = data_2019.Q1.drop(0)
year_2019['Age'] = year_2019.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2019['Job_field'] = data_2019.Q5.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2019['Sex'] = data_2019.Q2.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2019['Visual_modules'] = (data_2019['Q20_Part_1'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_2'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_3'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q20_Part_5'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_6'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_7'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q20_Part_9'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_10'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_11'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_12'].fillna('').drop(0).str.strip(' '))
year_2019['Visual_modules'] = year_2019.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2019['ML_modules'] = (data_2019['Q28_Part_1'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_2'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_3'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q28_Part_5'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_6'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_7'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q28_Part_9'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_10'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_11'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_12'].fillna('').drop(0).str.strip(' '))
year_2019['ML_modules'] = year_2019.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2019['Year'] = 2019
#'20
year_2020['Salary'] = data_2020.Q24.apply(lambda x: consolidate_salary_2019_2020(x))
year_2020['Country'] = data_2020.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2020['Age'] = data_2020.Q1.drop(0)
year_2020['Age'] = year_2020.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2020['Job_field'] = data_2020.Q5.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2020['Sex'] = data_2020.Q2.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2020['Visual_modules'] = (data_2020['Q14_Part_1'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_2'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_3'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q14_Part_5'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_6'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_7'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q14_Part_9'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_10'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_11'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_OTHER'].fillna('').drop(0).str.strip(' '))
year_2020['Visual_modules'] = year_2020.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2020['ML_modules'] = (data_2020['Q16_Part_1'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_2'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_3'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_5'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_6'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_7'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_9'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_10'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_11'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_13'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_14'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_15'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_OTHER'].fillna('').drop(0).str.strip(' '))
year_2020['ML_modules'] = year_2020.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2020['Year'] = 2020
#consolidating the dataframe
consolidated_data = year_2017
consolidated_data = consolidated_data.append(year_2018)
consolidated_data = consolidated_data.append(year_2019)
consolidated_data = consolidated_data.append(year_2020)
consolidated_data = consolidated_data.reset_index(drop=True)
sub_2017 = year_2017['Country'].value_counts().sort_values(ascending=False).head(10).reset_index().round(1)
sub_2018 = year_2018['Country'].value_counts().sort_values(ascending=False).head(10).reset_index().round(1)
sub_2019 = year_2019['Country'].value_counts().sort_values(ascending=False).head(10).reset_index().round(1)
sub_2020 = year_2020['Country'].value_counts().sort_values(ascending=False).head(10).reset_index().round(1)
fig = make_subplots(rows=2, cols=2, subplot_titles=('2017', '2018', '2019', '2020'))
fig.add_trace(go.Bar(x=sub_2017['index'], y=sub_2017['Country']), row=1, col=1)
fig.add_trace(go.Bar(x=sub_2018['index'], y=sub_2018['Country']), row=1, col=2)
fig.add_trace(go.Bar(x=sub_2019['index'], y=sub_2019['Country']), row=2, col=1)
fig.add_trace(go.Bar(x=sub_2020['index'], y=sub_2020['Country']), row=2, col=2)
fig.update_layout(title_text='Top 10 countries 2017-20 (total number of respondents)', title_x=0.5)
fig.update_layout(showlegend=False)
fig.show() | code |
50233625/cell_17 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
data_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='ISO-8859-1', low_memory=False)
data_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', low_memory=False)
data_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv', low_memory=False)
data_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
def consolidate_country_2017_to_2020(x):
if (x == 'United States') | (x == 'United States of America'):
return 'USA'
elif (x == 'United Kingdom of Great Britain and Northern Ireland') | (x == 'United Kingdom'):
return 'UK'
elif x == "People 's Republic of China":
return 'China'
elif x == 'Iran, Islamic Republic of...':
return 'Iran'
else:
return x
def consolidate_salary_2017(x):
try:
x = int(x.replace(',', ''))
if x < 30000:
return '0-30k'
if x < 50000:
return '30-50k'
elif x < 100000:
return '50-100k'
elif x < 150000:
return '100-150k'
elif x < 200000:
return '150-200k'
elif x < 250000:
return '200-250k'
elif x < 500000:
return '250-500k'
else:
return '$500k+'
except:
return 'No compensation disclosed'
def consolidate_salary_2018(x):
if (x == '0-10,000') | (x == '10-20,000') | (x == '20-30,000'):
return '0-30k'
elif (x == '30-40,000') | (x == '40-50,000'):
return '30-50k'
elif (x == '50-60,000') | (x == '60-70,000') | (x == '70-80,000') | (x == '80-90,000') | (x == '90-100,000'):
return '50-100k'
elif (x == '100-125,000') | (x == '125-150,000'):
return '100-150k'
elif x == '150-200,000':
return '150-200k'
elif x == '200-250,000':
return '200-250k'
elif (x == '250-300,000') | (x == '300-400,000') | (x == '400-500,000'):
return '250-500k'
elif x == '500,000+':
return '$500k+'
else:
return 'No compensation disclosed'
def consolidate_salary_2019_2020(x):
if (x == '$0-999') | (x == '1,000-1,999') | (x == '2,000-2,999') | (x == '3,000-3,999') | (x == '4,000-4,999') | (x == '5,000-7,499') | (x == '7,500-9,999') | (x == '10,000-14,999') | (x == '15,000-19,999') | (x == '20,000-24,999') | (x == '25,000-29,999'):
return '0-30k'
elif (x == '30,000-39,999') | (x == '40,000-49,999'):
return '30-50k'
elif (x == '50,000-59,999 ') | (x == '60,000-69,999') | (x == '70,000-79,999') | (x == '80,000-89,999') | (x == '90,000-99,999'):
return '50-100k'
elif (x == '100,000-124,999') | (x == '125,000-149,999'):
return '100-150k'
elif x == '150,000-199,999':
return '150-200k'
elif x == '200,000-249,999':
return '200-250k'
elif (x == '250,000-299,999') | (x == '300,000-500,000'):
return '250-500k'
elif x == '> $500,000':
return '$500k+'
else:
return 'No compensation disclosed'
def consolidate_sex_2017_to_2020(x):
if (x == 'Prefer not to say ') | (x == 'Prefer to self-describe') | (x == 'Nonbinary') | (x == 'A different identity') | (x == 'Prefer not to say') | (x == 'Non-binary, genderqueer, or gender non-conforming'):
return 'Other'
elif x == 'Man':
return 'Male'
elif x == 'Woman':
return 'Female'
else:
return x
def consolidate_job_titles_2017_to_2020(x):
if (x == 'Data Scientist') | (x == 'Machine Learning Engineer') | (x == 'Predictive Modeler'):
return 'Data science/ML engineers'
elif (x == 'Software Engineer') | (x == 'Software Developer/Software Engineer') | (x == 'Programmer') | (x == 'Developer Advocate') | (x == 'Computer Scientist'):
return 'Software developers'
elif (x == 'Data Analyst') | (x == 'Business Analyst') | (x == 'Data Miner') | (x == 'Marketing Analyst') | (x == 'Data Journalist'):
return 'Data/business analysts'
elif (x == 'DBA/Database Engineer') | (x == 'Data Engineer') | (x == 'Data Miner') | (x == 'Marketing Analyst') | (x == 'Data Journalist'):
return 'Data engineers'
elif (x == 'Student') | (x == 'Researcher'):
return 'Students'
else:
return 'Other job fields'
def consolidate_age_2017(x):
if x < 25:
return 'Under 25'
elif x < 35:
return '25-35'
elif x < 45:
return '35-45'
elif x < 60:
return '45-60'
elif x < 70:
return '60-69'
else:
return '70+'
def consolidate_age_2018_to_2020(x):
if (x == '18-21') | (x == '22-24'):
return 'Under 25'
elif (x == '25-29') | (x == '30-34'):
return '25-35'
elif (x == '35-39') | (x == '40-44'):
return '35-45'
elif (x == '45-49') | (x == '50-54') | (x == '55-59'):
return '45-60'
else:
return x
def consolidate_vizmodules_2018_to_2020(x):
if x == 'MatplotlibSeaborn':
return 'Matplotlib & Seaborn'
elif (x == 'Plotly / Plotly Express') | (x == 'Plotly'):
return 'Plotly Alone'
elif (x == 'MatplotlibPlotly / Plotly Express') | (x == 'MatplotlibPlotly'):
return 'Matplotlib & Plotly'
elif (x == 'MatplotlibSeabornPlotly / Plotly ExpressBokeh') | (x == 'MatplotlibPlotly / Plotly ExpressBokehSeaborn') | (x == 'MatplotlibPlotlyBokehSeaborn'):
return 'All 4 modules'
elif x == 'Bokeh':
return 'Bokeh Alone'
elif x == 'MatplotlibBokeh':
return 'Matplotlib & Bokeh'
elif x == 'Seaborn':
return 'Seaborn Alone'
elif x == 'Matplotlib':
return 'Matplotlib Alone'
elif x == 'Shiny':
return 'Shiny Alone'
elif (x == 'Ggplot / ggplot2') | (x == 'ggplot2'):
return 'Ggplot Alone'
elif (x == 'Ggplot / ggplot2Shiny') | (x == 'ggplot2Shiny'):
return 'Both Modules'
else:
return x
def consolidate_mlmodules_2018_to_2020(x):
if (x == 'Scikit-learn') | (x == 'Scikit-Learn') | (x == 'Scikit-LearnrandomForest') | (x == 'Scikit-learnRandomForest') | (x == 'Scikit-learnCaret') | (x == 'randomForest') | (x == 'Caret') | (x == 'Scikit-learnRandomForestCaret') | (x == 'CaretrandomForest') | (x == 'RandomForest') | (x == 'Scikit-LearnCaret') | (x == 'RandomForestCaret') | (x == 'Scikit-LearnCaretrandomForest'):
return 'Classic ML modules'
elif (x == 'TensorFlow') | (x == 'TensorFlowKeras') | (x == 'PyTorch') | (x == 'TensorFlowKerasPyTorch') | (x == 'Keras') | (x == 'TensorFlowPyTorch') | (x == 'KerasPyTorch') | (x == 'TensorFlowKerasPyTorchFast.ai') | (x == 'TensorFlowKerasPyTorchFastai') | (x == 'PyTorchFast.ai') | (x == 'TensorFlowKerasFastai') | (x == 'Fast.ai') | (x == 'TensorFlowKerasFast.ai') | (x == 'KerasPyTorchFast.ai') | (x == 'TensorFlowPyTorchFast.ai') | (x == 'Fastai') | (x == 'PyTorchFastai') | (x == 'KerasFast.ai') | (x == 'TensorFlowPyTorchFastai') | (x == 'TensorFlowFast.ai ') | (x == 'KerasPyTorchFastai') | (x == 'TensorFlowFastai') | (x == 'KerasFastai') | (x == 'TensorFlowFast.ai') | (x == 'Xgboost') | (x == 'XgboostLightGBM') | (x == 'Xgboostlightgbm') | (x == 'Xgboostlightgbmcatboost') | (x == 'XgboostLightGBMCatBoost') | (x == 'LightGBM') | (x == 'Xgboostcatboost') | (x == 'XgboostCatBoost') | (x == 'lightgbm') | (x == 'CatBoost') | (x == 'catboost') | (x == 'LightGBMCatBoost') | (x == 'lightgbmcatboost') | (x == 'TensorFlowKerasXgboost') | (x == 'TensorFlowKerasPyTorchXgboost') | (x == 'TensorFlowKerasXgboostLightGBM') | (x == 'TensorFlowXgboost') | (x == 'KerasXgboost') | (x == 'TensorFlowKerasPyTorchXgboostLightGBM') | (x == 'TensorFlowKerasXgboostlightgbm') | (x == 'PyTorchXgboost') | (x == 'TensorFlowKerasXgboostlightgbmcatboost') | (x == 'KerasXgboostLightGBM') | (x == 'KerasPyTorchXgboost') | (x == 'KerasPyTorchXgboostLightGBM') | (x == 'PyTorchLightGBM') | (x == 'TensorFlowKerasPyTorchFast.aiXgboostLightGBM ') | (x == 'TensorFlowKerasPyTorchXgboostLightGBMCatBoost') | (x == 'PyTorchXgboostLightGBM') | (x == 'TensorFlowKerasXgboostLightGBMCatBoost') | (x == 'TensorFlowKerasPyTorchXgboostlightgbm') | (x == 'TensorFlowPyTorchXgboost') | (x == 'TensorFlowKerasPyTorchXgboostlightgbmcatboost'):
return 'Deep Learning & Boosting modules'
else:
return x
# Merge all survey data from all years into one dataframe and unify responses as much as possible
consolidated_data = pd.DataFrame()
year_2017 = pd.DataFrame()
year_2018 = pd.DataFrame()
year_2019 = pd.DataFrame()
year_2020 = pd.DataFrame()
#'17
year_2017['Salary'] = data_2017.CompensationAmount.apply(lambda x: consolidate_salary_2017(x))
year_2017['Country'] = data_2017.Country.apply(lambda x: consolidate_country_2017_to_2020(x))
year_2017['Age'] = data_2017.Age.drop(0).apply(lambda x: consolidate_age_2017(x))
year_2017['Job_field'] = data_2017.CurrentJobTitleSelect.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2017['Sex'] = data_2017.GenderSelect.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2017['Year'] = 2017
#'18
year_2018['Salary'] = data_2018.Q9.apply(lambda x: consolidate_salary_2018(x))
year_2018['Country'] = data_2018.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2018['Age'] = data_2018.Q2.drop(0).replace({'70-79' : '70+', '80+' : '70+'})
year_2018['Age'] = year_2018.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2018['Job_field'] = data_2018.Q6.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2018['Sex'] = data_2018.Q1.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2018['Visual_modules'] = (data_2018['Q21_Part_1'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_2'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_3'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_5'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_6'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_7'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_9'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_10'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_11'].fillna('').drop(0).str.strip(' ') + data_2018['Q21_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q21_Part_9'].fillna('').drop(0).str.strip(' '))
year_2018['Visual_modules'] = year_2018.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2018['ML_modules'] = (data_2018['Q19_Part_1'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_2'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_3'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_5'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_6'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_7'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_9'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_10'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_11'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_13'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_14'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_15'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_16'].fillna('').drop(0).str.strip(' ')
+ data_2018['Q19_Part_17'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_18'].fillna('').drop(0).str.strip(' ') + data_2018['Q19_Part_19'].fillna('').drop(0).str.strip(' '))
year_2018['ML_modules'] = year_2018.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2018['Year'] = 2018
#'19
year_2019['Salary'] = data_2019.Q10.apply(lambda x: consolidate_salary_2019_2020(x))
year_2019['Country'] = data_2019.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2019['Age'] = data_2019.Q1.drop(0)
year_2019['Age'] = year_2019.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2019['Job_field'] = data_2019.Q5.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2019['Sex'] = data_2019.Q2.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2019['Visual_modules'] = (data_2019['Q20_Part_1'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_2'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_3'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q20_Part_5'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_6'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_7'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q20_Part_9'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_10'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_11'].fillna('').drop(0).str.strip(' ') + data_2019['Q20_Part_12'].fillna('').drop(0).str.strip(' '))
year_2019['Visual_modules'] = year_2019.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2019['ML_modules'] = (data_2019['Q28_Part_1'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_2'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_3'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q28_Part_5'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_6'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_7'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2019['Q28_Part_9'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_10'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_11'].fillna('').drop(0).str.strip(' ') + data_2019['Q28_Part_12'].fillna('').drop(0).str.strip(' '))
year_2019['ML_modules'] = year_2019.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2019['Year'] = 2019
#'20
year_2020['Salary'] = data_2020.Q24.apply(lambda x: consolidate_salary_2019_2020(x))
year_2020['Country'] = data_2020.Q3.drop(0).apply(lambda x: consolidate_country_2017_to_2020(x))
year_2020['Age'] = data_2020.Q1.drop(0)
year_2020['Age'] = year_2020.Age.apply(lambda x: consolidate_age_2018_to_2020(x))
year_2020['Job_field'] = data_2020.Q5.drop(0).apply(lambda x: consolidate_job_titles_2017_to_2020(x))
year_2020['Sex'] = data_2020.Q2.drop(0).apply(lambda x: consolidate_sex_2017_to_2020(x))
year_2020['Visual_modules'] = (data_2020['Q14_Part_1'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_2'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_3'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q14_Part_5'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_6'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_7'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q14_Part_9'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_10'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_Part_11'].fillna('').drop(0).str.strip(' ') + data_2020['Q14_OTHER'].fillna('').drop(0).str.strip(' '))
year_2020['Visual_modules'] = year_2020.Visual_modules.apply(lambda x: consolidate_vizmodules_2018_to_2020(x))
year_2020['ML_modules'] = (data_2020['Q16_Part_1'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_2'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_3'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_4'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_5'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_6'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_7'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_8'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_9'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_10'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_11'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_12'].fillna('').drop(0).str.strip(' ')
+ data_2020['Q16_Part_13'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_14'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_Part_15'].fillna('').drop(0).str.strip(' ') + data_2020['Q16_OTHER'].fillna('').drop(0).str.strip(' '))
year_2020['ML_modules'] = year_2020.ML_modules.apply(lambda x: consolidate_mlmodules_2018_to_2020(x))
year_2020['Year'] = 2020
#consolidating the dataframe
consolidated_data = year_2017
consolidated_data = consolidated_data.append(year_2018)
consolidated_data = consolidated_data.append(year_2019)
consolidated_data = consolidated_data.append(year_2020)
consolidated_data = consolidated_data.reset_index(drop=True)
#making subplots for top 10 countries on total number of respondents
sub_2017 = year_2017['Country'].value_counts().sort_values(ascending = False).head(10).reset_index().round(1)
sub_2018 = year_2018['Country'].value_counts().sort_values(ascending = False).head(10).reset_index().round(1)
sub_2019 = year_2019['Country'].value_counts().sort_values(ascending = False).head(10).reset_index().round(1)
sub_2020 = year_2020['Country'].value_counts().sort_values(ascending = False).head(10).reset_index().round(1)
fig = make_subplots(
rows=2, cols=2,
subplot_titles=("2017", "2018", "2019", "2020"))
fig.add_trace(go.Bar(x=sub_2017['index'], y=sub_2017['Country']),
row=1, col=1)
fig.add_trace(go.Bar(x=sub_2018['index'], y=sub_2018['Country']),
row=1, col=2)
fig.add_trace(go.Bar(x=sub_2019['index'], y=sub_2019['Country']),
row=2, col=1)
fig.add_trace(go.Bar(x=sub_2020['index'], y=sub_2020['Country']),
row=2, col=2)
fig.update_layout(
title_text="Top 10 countries 2017-20 (total number of respondents)", title_x=0.5)
fig.update_layout(showlegend=False)
fig.show()
animate_countries = consolidated_data.groupby(['Year', 'Country']).size().reset_index()
animate_countries['Percentage'] = consolidated_data.groupby(['Year', 'Country']).size().groupby(level=0).apply(lambda x: 100 * x / float(x.sum())).values.round(2)
animate_countries.columns = ['Year', 'Country', 'Counts', 'Percentage']
animate_countries = animate_countries.loc[(animate_countries['Country'] == 'USA') | (animate_countries['Country'] == 'India')]
fig = px.bar(animate_countries, x='Country', y='Percentage', color='Country', animation_frame='Year', range_y=[0, 50], text=animate_countries['Percentage'].apply(lambda x: '{0:1.2f}%'.format(x)))
fig.layout.updatemenus[0].buttons[0].args[1]['frame']['duration'] = 2200
fig.update_layout(title_text='India vs USA (2017-2020)', title_x=0.5)
fig.update_yaxes(title='Portion of Respondents (%)')
fig.update_xaxes(title='')
fig.update_traces(marker_line_color='rgb(31, 27, 25)', marker_line_width=1.2, opacity=1)
fig.update_layout(showlegend=False)
fig.show() | code |
128034461/cell_13 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.figure(figsize=(10, 6))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show() | code |
128034461/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
data.describe() | code |
128034461/cell_40 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data = data.drop(['list_mots'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(data.Tweet_freq, data.Sentiment_encoded, test_size=0.3, random_state=0)
pipe_svm = Pipeline([('tfidf', TfidfVectorizer()), ('model', LinearSVC())])
model_svm = pipe_svm.fit(x_train, y_train)
prediction_svm = model_svm.predict(x_test)
# Matrice de confusion en forme de heatmat
print('Matrice de confusion en forme de heatmap')
cm_svm = confusion_matrix(y_test, prediction_svm)
noms_des_classes = data.Sentiment.unique()
plt.figure(figsize=(10,7))
ax = sns.heatmap(cm_svm ,annot=True,linewidths=1, fmt = 'd', xticklabels=noms_des_classes, yticklabels=noms_des_classes)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",rotation_mode="anchor")
plt.show()
# Rapport de classification
print('Rapport de classification')
print(classification_report(y_test, prediction_svm, target_names=noms_des_classes))
print('Matrice de confusion en forme de heatmap')
cm_svm = confusion_matrix(y_test, prediction_svm)
noms_des_classes = data.Sentiment.unique()
plt.figure(figsize=(10, 7))
ax = sns.heatmap(cm_svm, annot=True, linewidths=1, fmt='d', xticklabels=noms_des_classes, yticklabels=noms_des_classes)
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
plt.show()
print('Rapport de classification')
print(classification_report(y_test, prediction_svm, target_names=noms_des_classes)) | code |
128034461/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples') | code |
128034461/cell_18 | [
"text_html_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figure(go.Funnelarea(text=temp.Sentiment, values=temp.Tweet, title={'position': 'top center', 'text': 'Distribution des sentiments'}))
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data['list_mots'] = data['Tweet'].apply(lambda x: str(x).split())
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Blues')
fig = px.bar(temp, x="count", y="Common_words", title='Commmon Words in Selected Text', orientation='h',
width=700, height=700,color='Common_words')
fig.show()
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp = temp.iloc[1:, :]
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Purples')
fig = px.bar(temp, x='count', y='Common_words', title='Commmon Words in Selected Text', orientation='h', width=700, height=700, color='Common_words')
fig.show() | code |
128034461/cell_28 | [
"text_html_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords, wordnet
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
STOPWORDS = set(stopwords.words('english'))
def remove_stopword_punctuation(x):
return [y for y in x if y not in STOPWORDS and len(y) > 2]
data['list_mots'] = data['list_mots'].apply(lambda x: remove_stopword_punctuation(x))
data = data.drop(['list_mots'], axis=1)
def remove_punct(text):
"""fonction pour supprimer la ponctuation"""
text = ''.join([char for char in text if char not in string.punctuation])
text = re.sub('[0-9]+', '', text)
return text
def remove_stopwords(text, STOPWORDS):
"""fonction pour supprimer les stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
def stem_words(text, stemmer):
"""fonction pour extraire les racines des mots"""
return ' '.join([stemmer.stem(word) for word in text.split()])
def remove_freqwords(text, FREQWORDS):
"""fonction pour supprimer les mots fréquents"""
return ' '.join([word for word in str(text).split() if word not in FREQWORDS])
count = Counter()
for text in data['Tweet_stemmed'].values:
for word in text.split():
count[word] += 1
count.most_common(10) | code |
128034461/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
data.head() | code |
128034461/cell_15 | [
"image_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figure(go.Funnelarea(text=temp.Sentiment, values=temp.Tweet, title={'position': 'top center', 'text': 'Distribution des sentiments'}))
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data['list_mots'] = data['Tweet'].apply(lambda x: str(x).split())
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Blues')
fig = px.bar(temp, x='count', y='Common_words', title='Commmon Words in Selected Text', orientation='h', width=700, height=700, color='Common_words')
fig.show() | code |
128034461/cell_17 | [
"text_html_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figure(go.Funnelarea(text=temp.Sentiment, values=temp.Tweet, title={'position': 'top center', 'text': 'Distribution des sentiments'}))
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data['list_mots'] = data['Tweet'].apply(lambda x: str(x).split())
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Blues')
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp = temp.iloc[1:, :]
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Purples') | code |
128034461/cell_31 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data = data.drop(['list_mots'], axis=1)
data.head() | code |
128034461/cell_14 | [
"text_html_output_2.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figure(go.Funnelarea(text=temp.Sentiment, values=temp.Tweet, title={'position': 'top center', 'text': 'Distribution des sentiments'}))
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data['list_mots'] = data['Tweet'].apply(lambda x: str(x).split())
top = Counter([item for sublist in data['list_mots'] for item in sublist])
temp = pd.DataFrame(top.most_common(20))
temp.columns = ['Common_words', 'count']
temp.style.background_gradient(cmap='Blues') | code |
128034461/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figure(go.Funnelarea(text=temp.Sentiment, values=temp.Tweet, title={'position': 'top center', 'text': 'Distribution des sentiments'}))
fig.show() | code |
128034461/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
text = ' '.join([x for x in data.Tweet])
wordcloud = WordCloud(background_color='white').generate(text)
plt.axis('off')
data = data.drop(['list_mots'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(data.Tweet_freq, data.Sentiment_encoded, test_size=0.3, random_state=0)
pipe_svm = Pipeline([('tfidf', TfidfVectorizer()), ('model', LinearSVC())])
model_svm = pipe_svm.fit(x_train, y_train)
prediction_svm = model_svm.predict(x_test)
print('Matrice de confusion en forme de heatmap')
cm_svm = confusion_matrix(y_test, prediction_svm)
noms_des_classes = data.Sentiment.unique()
plt.figure(figsize=(10, 7))
ax = sns.heatmap(cm_svm, annot=True, linewidths=1, fmt='d', xticklabels=noms_des_classes, yticklabels=noms_des_classes)
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
plt.show()
print('Rapport de classification')
print(classification_report(y_test, prediction_svm, target_names=noms_des_classes)) | code |
1008769/cell_13 | [
"text_plain_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import newaxis
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time #helper libraries
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape
wltw_stock_prices = wltw.close.values.astype('float32')
wltw_stock_prices = wltw_stock_prices.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
wltw_stock_prices = scaler.fit_transform(wltw_stock_prices)
def create_dataset(dataset, look_back=1):
dataX, dataY = ([], [])
for i in range(len(dataset) - look_back - 1):
a = dataset[i:i + look_back, 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return (np.array(dataX), np.array(dataY))
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
model.fit(trainX, trainY, batch_size=128, nb_epoch=10, validation_split=0.1)
def plot_results_multiple(predicted_data, true_data):
plt.plot(true_data)
plt.plot(predicted_data)
plt.show()
def predict_sequences_multiple(model, firstValue, length):
prediction_seqs = []
curr_frame = firstValue
for i in range(length):
predicted = []
predicted.append(model.predict(curr_frame[newaxis, :, :])[0, 0])
curr_frame = curr_frame[0:]
curr_frame = np.insert(curr_frame[0:], i + 1, predicted[-1], axis=0)
prediction_seqs.append(predicted[-1])
return prediction_seqs
predictions = predict_sequences_multiple(model, testX[0], len(testX))
plot_results_multiple(predictions, testY) | code |
1008769/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape | code |
1008769/cell_6 | [
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape
wltw_stock_prices = wltw.close.values.astype('float32')
wltw_stock_prices = wltw_stock_prices.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
wltw_stock_prices = scaler.fit_transform(wltw_stock_prices)
train_size = int(len(wltw_stock_prices) * 0.67)
test_size = len(wltw_stock_prices) - train_size
train, test = (wltw_stock_prices[0:train_size, :], wltw_stock_prices[train_size:len(wltw_stock_prices), :])
print(len(train), len(test)) | code |
1008769/cell_2 | [
"image_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
import time
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from numpy import newaxis | code |
1008769/cell_11 | [
"text_html_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import numpy as np # linear algebra
import time #helper libraries
def create_dataset(dataset, look_back=1):
dataX, dataY = ([], [])
for i in range(len(dataset) - look_back - 1):
a = dataset[i:i + look_back, 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return (np.array(dataX), np.array(dataY))
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
model.fit(trainX, trainY, batch_size=128, nb_epoch=10, validation_split=0.1) | code |
1008769/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset | code |
1008769/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import time #helper libraries
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
print('compilation time : ', time.time() - start) | code |
1008769/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape
wltw_stock_prices = wltw.close.values.astype('float32')
plt.plot(wltw_stock_prices)
plt.show()
wltw_stock_prices = wltw_stock_prices.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
wltw_stock_prices = scaler.fit_transform(wltw_stock_prices) | code |
18154187/cell_21 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
Europa_df = fi_df[fi_df['Continent'] == 'Europe']
pkb_df = pd.read_csv('../input/gdpset/GDP.csv', delimiter=';')
pkb_df = pkb_df.loc[:, ['Country', 'GDP_2016', 'GDP_2017', 'GDP_PC_2018']]
pkb_sorted = pkb_df.sort_values(['GDP_2017'], ascending=[True])
pkb_sorted.set_index('Country')
pkb_eu = pd.merge(Europa_df, pkb_df, on='Country', how='left')
pkb_eu_cl = pkb_eu.dropna(axis=0, subset=['GDP_2017'])
pkb_eu_cl['GDP_2017'] = pkb_eu_cl['GDP_2017'].astype('int64')
pkb_eu_cl['GDP_2016'] = pkb_eu_cl['GDP_2016'].astype('int64')
pkb_eu_cl['GDP_PC_2018'] = pkb_eu_cl['GDP_PC_2018'].astype('int64')
pkb_eu_cl = pkb_eu_cl.loc[:, ['Country', 'Total_employee_estimate', 'Current_employee_estimate', 'GDP_2017', 'GDP_2016', 'GDP_PC_2018']]
X = pkb_eu_cl.loc[:, ['Total_employee_estimate', 'GDP_2017']]
Nc = range(1, 20)
kmeans = [KMeans(n_clusters=i) for i in Nc]
kmeans
score = [kmeans[i].fit(X).score(X) for i in range(len(kmeans))]
score
k = 5
kmeans = KMeans(n_clusters=k)
x_kmeans = kmeans.fit(X)
labels = kmeans.labels_
labels[::20]
centroids = kmeans.cluster_centers_
centroids
for i in range(k):
ds = X[labels == i]
plt.plot(ds.iloc[:, 0], ds.iloc[:, 1], 'o')
lines = plt.plot(centroids[i, 0], centroids[i, 1], 'ro')
plt.setp(lines, ms=15.0)
plt.setp(lines, mew=2.0)
plt.xlabel('Amount of employees')
plt.ylabel('GDP_2017')
plt.title('5 Cluster K-Means')
plt.show() | code |
18154187/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
Europa_df = fi_df[fi_df['Continent'] == 'Europe']
Europa_df.head(2)
Europa_df.info() | code |
18154187/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
comp_nonan_df.info()
comp_nonan_df.head()
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df | code |
18154187/cell_23 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
Europa_df = fi_df[fi_df['Continent'] == 'Europe']
pkb_df = pd.read_csv('../input/gdpset/GDP.csv', delimiter=';')
pkb_df = pkb_df.loc[:, ['Country', 'GDP_2016', 'GDP_2017', 'GDP_PC_2018']]
pkb_sorted = pkb_df.sort_values(['GDP_2017'], ascending=[True])
pkb_sorted.set_index('Country')
pkb_eu = pd.merge(Europa_df, pkb_df, on='Country', how='left')
pkb_eu_cl = pkb_eu.dropna(axis=0, subset=['GDP_2017'])
pkb_eu_cl['GDP_2017'] = pkb_eu_cl['GDP_2017'].astype('int64')
pkb_eu_cl['GDP_2016'] = pkb_eu_cl['GDP_2016'].astype('int64')
pkb_eu_cl['GDP_PC_2018'] = pkb_eu_cl['GDP_PC_2018'].astype('int64')
pkb_eu_cl = pkb_eu_cl.loc[:, ['Country', 'Total_employee_estimate', 'Current_employee_estimate', 'GDP_2017', 'GDP_2016', 'GDP_PC_2018']]
X = pkb_eu_cl.loc[:, ['Total_employee_estimate', 'GDP_2017']]
Nc = range(1, 20)
kmeans = [KMeans(n_clusters=i) for i in Nc]
kmeans
score = [kmeans[i].fit(X).score(X) for i in range(len(kmeans))]
score
#algorytm k-means for 5 clasters
k = 5
kmeans = KMeans(n_clusters=k)
x_kmeans=kmeans.fit(X)
labels = kmeans.labels_
labels[::20]
centroids = kmeans.cluster_centers_
centroids
#plot
for i in range(k):
# select only data observations with cluster label == i
ds = X[labels == i]
# plot the data observations
plt.plot(ds.iloc[:,0],ds.iloc[:,1],'o')
# plot the centroids
lines = plt.plot(centroids[i,0],centroids[i,1],'ro')
# make the centroid x's bigger
plt.setp(lines,ms=15.0)
plt.setp(lines,mew=2.0)
plt.xlabel('Amount of employees')
plt.ylabel('GDP_2017')
plt.title('5 Cluster K-Means')
plt.show()
predict = kmeans.predict(X)
predict_1 = predict
predict_1 = predict + 1
pkb_eu_cl['cluster'] = pd.Series(predict_1, index=pkb_eu_cl.index)
pkb_eu_cl.head()
for i in range(1, 6):
unique_countries = pkb_eu_cl[pkb_eu_cl.cluster == i].Country.unique()
print('Cluster', i)
print(unique_countries)
print() | code |
18154187/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
comp_df.rename(columns={'year founded': 'year_founded', 'size range': 'size_range', 'linkedin url': 'linkedin_url', 'current employee estimate': 'current_employee_estimate', 'total employee estimate': 'total_employee_estimate'}, inplace=True)
comp_df.columns = comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country'] = comp_df.Country.str.title()
comp_df.head() | code |
18154187/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
Europa_df = fi_df[fi_df['Continent'] == 'Europe']
pkb_df = pd.read_csv('../input/gdpset/GDP.csv', delimiter=';')
pkb_df = pkb_df.loc[:, ['Country', 'GDP_2016', 'GDP_2017', 'GDP_PC_2018']]
pkb_sorted = pkb_df.sort_values(['GDP_2017'], ascending=[True])
pkb_sorted.set_index('Country')
pkb_eu = pd.merge(Europa_df, pkb_df, on='Country', how='left')
pkb_eu_cl = pkb_eu.dropna(axis=0, subset=['GDP_2017'])
pkb_eu_cl['GDP_2017'] = pkb_eu_cl['GDP_2017'].astype('int64')
pkb_eu_cl['GDP_2016'] = pkb_eu_cl['GDP_2016'].astype('int64')
pkb_eu_cl['GDP_PC_2018'] = pkb_eu_cl['GDP_PC_2018'].astype('int64')
pkb_eu_cl = pkb_eu_cl.loc[:, ['Country', 'Total_employee_estimate', 'Current_employee_estimate', 'GDP_2017', 'GDP_2016', 'GDP_PC_2018']]
X = pkb_eu_cl.loc[:, ['Total_employee_estimate', 'GDP_2017']]
Nc = range(1, 20)
kmeans = [KMeans(n_clusters=i) for i in Nc]
kmeans
score = [kmeans[i].fit(X).score(X) for i in range(len(kmeans))]
score
pl.plot(Nc, score)
pl.xlabel('Number of Clusters')
pl.ylabel('Score')
pl.title('Elbow Curve')
pl.show() | code |
18154187/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18154187/cell_8 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_df.head()
continent_df.tail()
continent_1 = continent_df[['Continent', 'Country']]
continent_1.head()
print(any(continent_1['Country'].duplicated()))
continent_1.head() | code |
18154187/cell_15 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
pkb_df = pd.read_csv('../input/gdpset/GDP.csv', delimiter=';')
pkb_df.info()
pkb_df.head()
pkb_df = pkb_df.loc[:, ['Country', 'GDP_2016', 'GDP_2017', 'GDP_PC_2018']]
pkb_sorted = pkb_df.sort_values(['GDP_2017'], ascending=[True])
pkb_sorted.set_index('Country')
pkb_sorted.head() | code |
18154187/cell_17 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
continent_2 = pd.concat([continent_1, df_5], sort=True)
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
Europa_df = fi_df[fi_df['Continent'] == 'Europe']
pkb_df = pd.read_csv('../input/gdpset/GDP.csv', delimiter=';')
pkb_df = pkb_df.loc[:, ['Country', 'GDP_2016', 'GDP_2017', 'GDP_PC_2018']]
pkb_sorted = pkb_df.sort_values(['GDP_2017'], ascending=[True])
pkb_sorted.set_index('Country')
pkb_eu = pd.merge(Europa_df, pkb_df, on='Country', how='left')
pkb_eu.info()
pkb_eu_cl = pkb_eu.dropna(axis=0, subset=['GDP_2017'])
pkb_eu_cl['GDP_2017'] = pkb_eu_cl['GDP_2017'].astype('int64')
pkb_eu_cl['GDP_2016'] = pkb_eu_cl['GDP_2016'].astype('int64')
pkb_eu_cl['GDP_PC_2018'] = pkb_eu_cl['GDP_PC_2018'].astype('int64')
pkb_eu_cl = pkb_eu_cl.loc[:, ['Country', 'Total_employee_estimate', 'Current_employee_estimate', 'GDP_2017', 'GDP_2016', 'GDP_PC_2018']]
pkb_eu_cl.info()
pkb_eu_cl.head() | code |
18154187/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with fist upper letter.
comp_df.rename(columns={'year founded':'year_founded','size range':'size_range','linkedin url':'linkedin_url','current employee estimate':'current_employee_estimate','total employee estimate':'total_employee_estimate'}, inplace=True)
comp_df.columns=comp_df.columns.str.capitalize()
print(comp_df.columns)
comp_df['Country']=comp_df.Country.str.title()
comp_df.head()
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_1 = continent_df[['Continent', 'Country']]
comp_nonan_df = comp_df.dropna(axis=0, subset=['Country'])
comp_nonan_df.shape
marge_df = pd.merge(comp_nonan_df, continent_1, on='Country', how='left')
marge_df
contin_null = marge_df[['Continent', 'Country']]
contin_null
df_null = contin_null.loc[contin_null['Continent'].isnull()]
df_null
df_null['Country'].unique()
dict_null = [{'United States': 'North America', 'United Kingdom': 'Europe', 'Czechia': 'Europe', 'South Korea': 'Asia', 'Taiwan': 'Asia', 'Venezuela': 'South America', 'Hong Kong': 'Asia', 'Russia': 'Europe', 'Iran': 'Asia', 'Vietnam': 'Asia', 'Palestine': 'Asia', 'Trinidad And Tobago': 'North America', 'Macau': 'Asia', 'Syria': 'Asia', 'Tanzania': 'Africa', 'Isle Of Man': 'Europe', 'Brunei': 'Asia', 'Micronesia': 'Oceania', 'Côte D’Ivoire': 'Africa', 'Macedonia': 'Europe', 'Bolivia': 'South America', 'Moldova': 'Europe', 'Bosnia And Herzegovina': 'Europe', 'Democratic Republic Of The Congo': 'Africa', 'Netherlands Antilles': 'Europe', 'Laos': 'Asia', 'Saint Vincent And The Grenadines': 'North America', 'Faroe Islands': 'Europe', 'Saint Kitts And Nevis': 'North America', 'Kosovo': 'Europe', 'Cape Verde': 'Africa', 'Svalbard And Jan Mayen': 'Europe', 'Turks And Caicos Islands': 'North America', 'São Tomé And Príncipe': 'Africa', 'Caribbean Netherlands': 'North America', 'Sint Maarten': 'North America', 'North Korea': 'Asia', 'Antigua And Barbuda': 'North America', 'Republic Of The Congo': 'Africa', 'Saint Martin': 'North America', 'U.S. Virgin Islands': 'North America', 'Saint Pierre And Miquelon': 'North America', 'Saint Barthélemy': 'North America'}]
df_5 = pd.DataFrame(dict_null).transpose()
df_5 = df_5.reset_index()
df_5.columns = ['Country', 'Continent']
df_5.info()
continent_2 = pd.concat([continent_1, df_5], sort=True)
continent_2.info()
continent_2.head()
fi_df = pd.merge(comp_nonan_df, continent_2, on='Country', how='left')
fi_df.info()
fi_df.head()
fi_df['Continent'].isnull().any()
fi_df.drop(['Unnamed: 0', 'Domain', 'Locality', 'Linkedin_url'], axis=1, inplace=True)
fi_df.info()
fi_df.head() | code |
2004768/cell_8 | [
"text_plain_output_1.png"
] | import lightgbm as lgb
import numpy as np
import pandas as pd
MAX_PRED = 1000
MAX_ROUNDS = 2000
indir = '../input/preparing-data-ii/'
indir2 = '../input/favorita-grocery-sales-forecasting/'
X_test = pd.read_csv(indir + 'X_test.csv')
X_val = pd.read_csv(indir + 'X_val.csv')
X_train = pd.read_csv(indir + 'X_train.csv')
y_train = np.array(pd.read_csv(indir + 'y_train.csv'))
y_val = np.array(pd.read_csv(indir + 'y_val.csv'))
stores_items = pd.read_csv(indir + 'stores_items.csv', index_col=['store_nbr', 'item_nbr'])
test_ids = pd.read_csv(indir + 'test_ids.csv', parse_dates=['date']).set_index(['store_nbr', 'item_nbr', 'date'])
items = pd.read_csv(indir2 + 'items.csv').set_index('item_nbr')
items = items.reindex(stores_items.index.get_level_values(1))
params = {'num_leaves': 31, 'objective': 'regression', 'min_data_in_leaf': 200, 'learning_rate': 0.025, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 2, 'metric': 'l2', 'num_threads': 4}
val_pred = []
test_pred = []
cate_vars = []
for i in range(16):
print('=' * 50)
print('Step %d' % (i + 1))
print('=' * 50)
dtrain = lgb.Dataset(X_train, label=y_train[:, i], categorical_feature=cate_vars, weight=pd.concat([items['perishable']] * 6) * 0.25 + 1)
dval = lgb.Dataset(X_val, label=y_val[:, i], reference=dtrain, weight=items['perishable'] * 0.25 + 1, categorical_feature=cate_vars)
bst = lgb.train(params, dtrain, num_boost_round=MAX_ROUNDS, valid_sets=[dtrain, dval], early_stopping_rounds=125, verbose_eval=500)
print('\n'.join(('%s: %.2f' % x for x in sorted(zip(X_train.columns, bst.feature_importance('gain')), key=lambda x: x[1], reverse=True))))
val_pred.append(bst.predict(X_val, num_iteration=bst.best_iteration or MAX_ROUNDS))
test_pred.append(bst.predict(X_test, num_iteration=bst.best_iteration or MAX_ROUNDS)) | code |
2004768/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from subprocess import check_output
from datetime import date, timedelta
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2004768/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import numpy as np
import pandas as pd
MAX_PRED = 1000
MAX_ROUNDS = 2000
indir = '../input/preparing-data-ii/'
indir2 = '../input/favorita-grocery-sales-forecasting/'
X_test = pd.read_csv(indir + 'X_test.csv')
X_val = pd.read_csv(indir + 'X_val.csv')
X_train = pd.read_csv(indir + 'X_train.csv')
y_train = np.array(pd.read_csv(indir + 'y_train.csv'))
y_val = np.array(pd.read_csv(indir + 'y_val.csv'))
stores_items = pd.read_csv(indir + 'stores_items.csv', index_col=['store_nbr', 'item_nbr'])
test_ids = pd.read_csv(indir + 'test_ids.csv', parse_dates=['date']).set_index(['store_nbr', 'item_nbr', 'date'])
items = pd.read_csv(indir2 + 'items.csv').set_index('item_nbr')
items = items.reindex(stores_items.index.get_level_values(1))
params = {'num_leaves': 31, 'objective': 'regression', 'min_data_in_leaf': 200, 'learning_rate': 0.025, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 2, 'metric': 'l2', 'num_threads': 4}
val_pred = []
test_pred = []
cate_vars = []
for i in range(16):
dtrain = lgb.Dataset(X_train, label=y_train[:, i], categorical_feature=cate_vars, weight=pd.concat([items['perishable']] * 6) * 0.25 + 1)
dval = lgb.Dataset(X_val, label=y_val[:, i], reference=dtrain, weight=items['perishable'] * 0.25 + 1, categorical_feature=cate_vars)
bst = lgb.train(params, dtrain, num_boost_round=MAX_ROUNDS, valid_sets=[dtrain, dval], early_stopping_rounds=125, verbose_eval=500)
val_pred.append(bst.predict(X_val, num_iteration=bst.best_iteration or MAX_ROUNDS))
test_pred.append(bst.predict(X_test, num_iteration=bst.best_iteration or MAX_ROUNDS))
n_public = 5
weights = pd.concat([items['perishable']]) * 0.25 + 1
print('Unweighted validation mse: ', mean_squared_error(y_val, np.array(val_pred).transpose()))
print('Full validation mse: ', mean_squared_error(y_val, np.array(val_pred).transpose(), sample_weight=weights))
print("'Public' validation mse: ", mean_squared_error(y_val[:, :n_public], np.array(val_pred).transpose()[:, :n_public], sample_weight=weights))
print("'Private' validation mse: ", mean_squared_error(y_val[:, n_public:], np.array(val_pred).transpose()[:, n_public:], sample_weight=weights)) | code |
330380/cell_13 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = combined['Age'].isnull()
people_w_unknown_age = combined.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = combined['Age'].notnull()
people_w_known_age = combined.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
combined['new_age'] = pd.concat((people_w_known_age['Age'], people_w_unknown_age['Age']))
combined['Embarked'].fillna('S', inplace=True)
combined['Fare'].fillna(value=combined['Fare'].mean(), inplace=True)
combined['kid'] = 0
combined.loc[combined.new_age <= 12, 'kid'] = 1
combined['parent'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age >= 18), 'parent'] = 1
combined['child'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age < 18), 'child'] = 1
combined['family'] = combined['SibSp'] + combined['Parch']
combined.loc[combined.family > 0, 'family'] = 1
combined['male'] = (~combined['Sex'].str.contains('fe')).astype(int)
not_needed = ['Age', 'Cabin', 'Name', 'Sex', 'Ticket', 'Parch', 'SibSp']
combined.drop(not_needed, axis=1, inplace=True)
categorical = ['Embarked', 'Title', 'Pclass']
for column in categorical:
dummy = pd.get_dummies(combined[column], prefix=column).astype(int)
combined = combined.join(dummy)
combined.drop(column, axis=1, inplace=True)
combined.head() | code |
330380/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std | code |
330380/cell_6 | [
"image_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
combined.info() | code |
330380/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = combined['Age'].isnull()
people_w_unknown_age = combined.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = combined['Age'].notnull()
people_w_known_age = combined.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
combined['new_age'] = pd.concat((people_w_known_age['Age'], people_w_unknown_age['Age']))
combined['Embarked'].fillna('S', inplace=True)
combined['Fare'].fillna(value=combined['Fare'].mean(), inplace=True)
combined['kid'] = 0
combined.loc[combined.new_age <= 12, 'kid'] = 1
combined['parent'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age >= 18), 'parent'] = 1
combined['child'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age < 18), 'child'] = 1
combined.tail(5)
combined['family'] = combined['SibSp'] + combined['Parch']
combined.loc[combined.family > 0, 'family'] = 1
combined['male'] = (~combined['Sex'].str.contains('fe')).astype(int)
combined.info() | code |
330380/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean | code |
330380/cell_15 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = combined['Age'].isnull()
people_w_unknown_age = combined.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = combined['Age'].notnull()
people_w_known_age = combined.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
combined['new_age'] = pd.concat((people_w_known_age['Age'], people_w_unknown_age['Age']))
combined['Embarked'].fillna('S', inplace=True)
combined['Fare'].fillna(value=combined['Fare'].mean(), inplace=True)
combined['kid'] = 0
combined.loc[combined.new_age <= 12, 'kid'] = 1
combined['parent'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age >= 18), 'parent'] = 1
combined['child'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age < 18), 'child'] = 1
combined['family'] = combined['SibSp'] + combined['Parch']
combined.loc[combined.family > 0, 'family'] = 1
combined['male'] = (~combined['Sex'].str.contains('fe')).astype(int)
not_needed = ['Age', 'Cabin', 'Name', 'Sex', 'Ticket', 'Parch', 'SibSp']
combined.drop(not_needed, axis=1, inplace=True)
categorical = ['Embarked', 'Title', 'Pclass']
for column in categorical:
dummy = pd.get_dummies(combined[column], prefix=column).astype(int)
combined = combined.join(dummy)
combined.drop(column, axis=1, inplace=True)
df = combined.loc[:len(titanic_train), :]
df_test = combined.loc[len(titanic_train) + 1:, :].copy()
df_test.drop('Survived', axis=1, inplace=True)
(df.shape, df_test.shape)
corr_mat = np.corrcoef(df.values.T)
ax = sns.heatmap(corr_mat, annot=True, fmt='.2f', xticklabels=df.columns, yticklabels=df.columns)
_ = ax.set_title('Correlation Matrix') | code |
330380/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import RobustScaler
from sklearn.feature_selection import RFECV, RFE
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.decomposition import KernelPCA
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve, f1_score | code |
330380/cell_14 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = combined['Age'].isnull()
people_w_unknown_age = combined.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = combined['Age'].notnull()
people_w_known_age = combined.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
combined['new_age'] = pd.concat((people_w_known_age['Age'], people_w_unknown_age['Age']))
combined['Embarked'].fillna('S', inplace=True)
combined['Fare'].fillna(value=combined['Fare'].mean(), inplace=True)
combined['kid'] = 0
combined.loc[combined.new_age <= 12, 'kid'] = 1
combined['parent'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age >= 18), 'parent'] = 1
combined['child'] = 0
combined.loc[(combined.Parch > 0) & (combined.new_age < 18), 'child'] = 1
combined['family'] = combined['SibSp'] + combined['Parch']
combined.loc[combined.family > 0, 'family'] = 1
combined['male'] = (~combined['Sex'].str.contains('fe')).astype(int)
not_needed = ['Age', 'Cabin', 'Name', 'Sex', 'Ticket', 'Parch', 'SibSp']
combined.drop(not_needed, axis=1, inplace=True)
categorical = ['Embarked', 'Title', 'Pclass']
for column in categorical:
dummy = pd.get_dummies(combined[column], prefix=column).astype(int)
combined = combined.join(dummy)
combined.drop(column, axis=1, inplace=True)
df = combined.loc[:len(titanic_train), :]
df_test = combined.loc[len(titanic_train) + 1:, :].copy()
df_test.drop('Survived', axis=1, inplace=True)
(df.shape, df_test.shape) | code |
330380/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = combined['Age'].isnull()
people_w_unknown_age = combined.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = combined['Age'].notnull()
people_w_known_age = combined.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
combined['new_age'] = pd.concat((people_w_known_age['Age'], people_w_unknown_age['Age']))
combined.head(7) | code |
330380/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
titanic_train.info()
print('\n')
titanic_test.info() | code |
17118578/cell_13 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape) | code |
17118578/cell_25 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import matplotlib.pyplot as plt
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
hist.history.keys()
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc']) | code |
17118578/cell_23 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
hist.history.keys() | code |
17118578/cell_20 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
def evaluate_metrics(cm, classes):
cm = cm.astype(np.float32)
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
PPV = TP / (TP + FP)
NPV = TN / (TN + FN)
FPR = FP / (FP + TN)
FNR = FN / (TP + FN)
FDR = FP / (TP + FP)
ACC = (TP + TN) / (TP + FP + FN + TN)
ACC_macro = np.mean(ACC)
F1 = 2 * PPV * TPR / (PPV + TPR)
F1_macro = np.mean(F1)
n_classes = len(classes)
return (ACC_macro, ACC, F1_macro, F1, TPR, TNR, PPV)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
bs = 64
y_pred = model_2019.predict(X_test, batch_size=bs)
y_pred = np.array([np.argmax(s) for s in y_pred])
f1 = f1_score(y_test, y_pred, average='macro')
print('>>> f1 score: {}'.format(f1))
report = classification_report(y_test, y_pred)
print(report) | code |
17118578/cell_6 | [
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
print(fnames[0]) | code |
17118578/cell_26 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import os
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
def evaluate_metrics(cm, classes):
cm = cm.astype(np.float32)
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
PPV = TP / (TP + FP)
NPV = TN / (TN + FN)
FPR = FP / (FP + TN)
FNR = FN / (TP + FN)
FDR = FP / (TP + FP)
ACC = (TP + TN) / (TP + FP + FN + TN)
ACC_macro = np.mean(ACC)
F1 = 2 * PPV * TPR / (PPV + TPR)
F1_macro = np.mean(F1)
n_classes = len(classes)
return (ACC_macro, ACC, F1_macro, F1, TPR, TNR, PPV)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
bs = 64
y_pred = model_2019.predict(X_test, batch_size=bs)
y_pred = np.array([np.argmax(s) for s in y_pred])
f1 = f1_score(y_test, y_pred, average='macro')
report = classification_report(y_test, y_pred)
hist.history.keys()
import os
import h5py
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import collections
import librosa
path = '../input/respiracion1/SC4061E0.npz'
data = np.load(path)
x = data['x']
y = data['y']
fig_1 = plt.figure(figsize=(12, 6))
plt.plot(x[100, ...].ravel())
plt.title('EEG Epoch')
plt.ylabel('Amplitude')
plt.xlabel('Time')
plt.show()
fig_2 = plt.figure(figsize=(12, 6))
plt.plot(y.ravel())
plt.title('Sleep Stages')
plt.ylabel('Classes')
plt.xlabel('Time')
plt.show() | code |
17118578/cell_19 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list) | code |
17118578/cell_8 | [
"text_plain_output_1.png"
] | from glob import glob
import mne
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
raw_train = mne.io.read_raw_edf(fnames[0], preload=True)
annot_train = mne.read_annotations(fnames[1])
raw_train.pick_channels(['Resp oro-nasal'])
raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.plot(start=20000, duration=40000, scalings='auto') | code |
17118578/cell_15 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from tensorflow import keras
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False) | code |
17118578/cell_16 | [
"text_plain_output_1.png"
] | from tensorflow import keras
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
Fs = 100
def modelo_base_2017(n_classes=5, use_sub_layer=False, summary=True):
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
if use_sub_layer:
sub_layer = Dense(1024, activation='relu', name='sub_layer')(outLayer)
outLayer = Reshape((1, int(fineShape[1] * fineShape[2] + coarseShape[1] * coarseShape[2])), name='reshape1')(outLayer)
outLayer = Bidirectional(LSTM(128, activation='relu', dropout=0.5, name='bLstm1'))(outLayer)
outLayer = Reshape((1, int(outLayer.get_shape()[1])))(outLayer)
outLayer = Bidirectional(LSTM(128, activation='relu', dropout=0.5, name='bLstm2'))(outLayer)
if use_sub_layer:
outLayer = concatenate([outLayer, sub_layer], name='merge_2')
outLayer = Dropout(rate=0.5, name='mDrop2')(outLayer)
outLayer = Dense(256, activation='relu', name='sub_layer_2')(outLayer)
outLayer = Dropout(rate=0.5, name='merge_out_sub')(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
if summary:
model.summary()
return model
model_2017 = modelo_base_2017(use_sub_layer=False, summary=True) | code |
17118578/cell_24 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import matplotlib.pyplot as plt
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
hist.history.keys()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss']) | code |
17118578/cell_22 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, normalize
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape
X = np.zeros((0, 3000, 1))
y = []
for fn in total_fs:
samples = np.load(fn)
X_data = samples['x']
X = np.concatenate((X, X_data), axis=0)
y.extend(samples['y'])
y = np.array(y)
X = (X - np.mean(X)) / np.std(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42)
(X_train.shape, X_test.shape, X_val.shape)
Fs = 100
def model_baseline_2019(n_classes=5, use_sub_layer=False, use_rnn=True):
"""Recurrent_Deep_Neural_Networks_for_Real-Time_Sleep
"""
inputLayer = Input(shape=(3000, 1), name='inLayer')
convFine = Conv1D(filters=64, kernel_size=int(Fs / 2), strides=int(Fs / 16), padding='same', activation='relu', name='fConv1')(inputLayer)
convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
fineShape = convFine.get_shape()
convFine = Flatten(name='fFlat1')(convFine)
convCoarse = Conv1D(filters=32, kernel_size=Fs * 4, strides=int(Fs / 2), padding='same', activation='relu', name='cConv1')(inputLayer)
convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
coarseShape = convCoarse.get_shape()
convCoarse = Flatten(name='cFlat1')(convCoarse)
mergeLayer = concatenate([convFine, convCoarse], name='merge_1')
outLayer = Dropout(rate=0.5, name='mDrop1')(mergeLayer)
outLayer = Reshape((1, outLayer.get_shape()[1]), name='reshape1')(outLayer)
outLayer = LSTM(64, return_sequences=True)(outLayer)
outLayer = LSTM(64, return_sequences=False)(outLayer)
outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
model = Model(inputLayer, outLayer)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model_2019 = model_baseline_2019(use_sub_layer=False, use_rnn=False)
def evaluate_metrics(cm, classes):
cm = cm.astype(np.float32)
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
PPV = TP / (TP + FP)
NPV = TN / (TN + FN)
FPR = FP / (FP + TN)
FNR = FN / (TP + FN)
FDR = FP / (TP + FP)
ACC = (TP + TN) / (TP + FP + FN + TN)
ACC_macro = np.mean(ACC)
F1 = 2 * PPV * TPR / (PPV + TPR)
F1_macro = np.mean(F1)
n_classes = len(classes)
return (ACC_macro, ACC, F1_macro, F1, TPR, TNR, PPV)
checkpoint = ModelCheckpoint('model_cps', monitor='val_loss', verbose=1, save_best_only=True, mode='max')
redonplat = ReduceLROnPlateau(monitor='val_loss', mode='max', patience=5, verbose=2)
csv_logger = CSVLogger('log_training.csv', append=True, separator=',')
callbacks_list = [checkpoint, redonplat, csv_logger]
y_train_ = to_categorical(y_train)
y_val_ = to_categorical(y_val)
y_test_ = to_categorical(y_test)
hist = model_2019.fit(X_train, y_train_, batch_size=64, epochs=50, validation_data=(X_val, y_val_), callbacks=callbacks_list)
bs = 64
y_pred = model_2019.predict(X_test, batch_size=bs)
y_pred = np.array([np.argmax(s) for s in y_pred])
f1 = f1_score(y_test, y_pred, average='macro')
report = classification_report(y_test, y_pred)
accuracy_score(y_test, y_pred) | code |
17118578/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from glob import glob
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) for k in total_fs}
samples = total_data['../input/respiracion1/SC4001E0.npz']
samples['x'].shape | code |
17118578/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm, tqdm_notebook
import tensorflow as tf
from tensorflow import keras
from keras import optimizers, losses, activations, models
from tensorflow.keras.utils import to_categorical, normalize
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import CuDNNGRU, CuDNNLSTM
from tensorflow.keras.layers import concatenate, GlobalMaxPool1D
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
import mne
from mne.io import read_raw_edf | code |
16136832/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(total_suicidal, total_suicidal['year']):
strat_train_set = total_suicidal.loc[train_index]
strat_test_set = total_suicidal.loc[test_index]
suicidal_copy = strat_train_set.copy()
suicidal_copy.plot(kind='scatter', x='year', y='suicides_no') | code |
16136832/cell_26 | [
"text_html_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(total_suicidal, total_suicidal['year']):
strat_train_set = total_suicidal.loc[train_index]
strat_test_set = total_suicidal.loc[test_index]
total_suicidal['year'].value_counts() / len(total_suicidal) | code |
16136832/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16136832/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal['country'].value_counts() | code |
16136832/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
total_suicidal.hist(bins=50, figsize=(20, 15))
plt.show() | code |
16136832/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(total_suicidal, total_suicidal['year']):
strat_train_set = total_suicidal.loc[train_index]
strat_test_set = total_suicidal.loc[test_index]
suicidal_copy = strat_train_set.copy()
suicidal_copy.plot(kind='scatter', x='gdp_per_capita ($)', y='suicides_no') | code |
16136832/cell_28 | [
"image_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(total_suicidal, total_suicidal['year']):
strat_train_set = total_suicidal.loc[train_index]
strat_test_set = total_suicidal.loc[test_index]
strat_test_set['year'].value_counts() / len(strat_test_set) | code |
16136832/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.describe() | code |
16136832/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.info() | code |
16136832/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.head() | code |
90122427/cell_25 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
world_merged.plot(column='ProbDyingBoth', legend=True, figsize=(15, 8), missing_kwds={'color': 'lightgrey', 'label': 'Missing values'})
plt.title('Overall dying rates for both genders')
plt.show() | code |
90122427/cell_33 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
visualize_word_counts(country_avg.set_index('Country')['ProbDyingBoth'].to_dict(), show=False)
visualize_word_counts(country_avg.set_index('Country')['SuicideBoth'].to_dict(), show=False)
top10_death = country_avg.sort_values('ProbDyingBoth', ascending=False).head(10)
top10_death
data_top10_death = data[data.Country.isin(top10_death['Country'])].iloc[::-1, :]
plt.figure(figsize=(15, 6))
sns.lineplot(data=data_top10_death, x='Year', y='ProbDyingBoth', hue='Country')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show() | code |
90122427/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
data.head() | code |
90122427/cell_29 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
visualize_word_counts(country_avg.set_index('Country')['ProbDyingBoth'].to_dict(), show=False)
visualize_word_counts(country_avg.set_index('Country')['SuicideBoth'].to_dict(), show=False)
plt.title('Suicide rates for both genders')
plt.show() | code |
90122427/cell_26 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
world_merged.plot(column='SuicideBoth', legend=True, figsize=(15, 8), missing_kwds={'color': 'lightgrey', 'label': 'Missing values'})
plt.title('Overall suicide rates for both genders')
plt.show() | code |
90122427/cell_19 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
country_avg.sort_values(by='ProbDying_male_minus_female') | code |
90122427/cell_18 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))
ax[0].plot([0, 60], [0, 60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0, 100], [0, 100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show() | code |
90122427/cell_32 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
visualize_word_counts(country_avg.set_index('Country')['ProbDyingBoth'].to_dict(), show=False)
visualize_word_counts(country_avg.set_index('Country')['SuicideBoth'].to_dict(), show=False)
top10_death = country_avg.sort_values('ProbDyingBoth', ascending=False).head(10)
top10_death
top10_suicide = country_avg.sort_values('SuicideBoth', ascending=False).head(10)
top10_suicide | code |
90122427/cell_28 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
visualize_word_counts(country_avg.set_index('Country')['ProbDyingBoth'].to_dict(), show=False)
plt.title('Dying rates for both genders')
plt.show() | code |
90122427/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
countries['Germany'].head() | code |
90122427/cell_31 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate_from_frequencies(counts)
plt.axis('off')
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5)
countries = {val: df for val, df in data.groupby('Country')}
country_avg = data.groupby('Country').mean().reset_index()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
ax[0].plot([0,60],[0,60], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='ProbDyingMale', y='ProbDyingFemale', s=100, ax=ax[0])
ax[0].set_title('Dying rates by gender')
ax[1].plot([0,100],[0,100], linewidth=3, color='black')
sns.scatterplot(data=country_avg, x='SuicideMale', y='SuicideFemale', s=100, ax=ax[1])
ax[1].set_title('Suicide rates by gender')
plt.show()
country_avg.sort_values(by='ProbDying_male_minus_female')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
geopandas_countryname_correction = {'Bosnia and Herz.': 'Bosnia and Herzegovina', 'Brunei': 'Brunei Darussalam', 'Central African Rep.': 'Central African Republic', 'Dem. Rep. Congo': 'Democratic Republic of the Congo', 'Dominican Rep.': 'Dominican Republic', 'Eq. Guinea': 'Equatorial Guinea', 'eSwatini': 'Eswatini', 'Laos': "Lao People's Democratic Republic", 'Macedonia': 'North Macedonia', 'Moldova': 'Republic of Moldova', 'Russia': 'Russian Federation', 'United Kingdom': 'United Kingdom of Great Britain and Northern Ireland', 'Tanzania': 'United Republic of Tanzania', 'Vietnam': 'Viet Nam', 'Syria': 'Syrian Arab Republic', 'S. Sudan': 'South Sudan', 'Solomon Is.': 'Solomon Islands', 'North Korea': "Democratic People's Republic of Korea"}
world['Country'] = world['name'].map(geopandas_countryname_correction).fillna(world['name'])
world_merged = world.merge(country_avg, on='Country', how='left')
visualize_word_counts(country_avg.set_index('Country')['ProbDyingBoth'].to_dict(), show=False)
visualize_word_counts(country_avg.set_index('Country')['SuicideBoth'].to_dict(), show=False)
top10_death = country_avg.sort_values('ProbDyingBoth', ascending=False).head(10)
top10_death | code |
90122427/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in columns[2:]:
data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float')
data.sample(5) | code |
1007495/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
data.describe(include=['O']) | code |
1007495/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
newage = data[['Age', 'Pclass', 'Sex']].dropna()
print('Pclass 1 F = ' + str(np.median(newage.query('Pclass == 1 and Sex == "female"')['Age'])))
print('Pclass 2 F = ' + str(np.median(newage.query('Pclass == 2 and Sex == "female"')['Age'])))
print('Pclass 3 F = ' + str(np.median(newage.query('Pclass == 3 and Sex == "female"')['Age'])))
print('Pclass 1 M = ' + str(np.median(newage.query('Pclass == 1 and Sex == "male"')['Age'])))
print('Pclass 2 M = ' + str(np.median(newage.query('Pclass == 2 and Sex == "male"')['Age'])))
print('Pclass 3 M = ' + str(np.median(newage.query('Pclass == 3 and Sex == "male"')['Age'])))
data1 = data.query('Pclass == 1 and Sex == "female"')
data1['Age'] = data1['Age'].fillna(36)
data2 = data.query('Pclass == 2 and Sex == "female"')
data2['Age'] = data2['Age'].fillna(28)
data3 = data.query('Pclass == 3 and Sex == "female"')
data3['Age'] = data3['Age'].fillna(22)
data4 = data.query('Pclass == 1 and Sex == "male"')
data4['Age'] = data4['Age'].fillna(42)
data5 = data.query('Pclass == 2 and Sex == "male"')
data5['Age'] = data5['Age'].fillna(29.5)
data6 = data.query('Pclass == 3 and Sex == "male"')
data6['Age'] = data6['Age'].fillna(25)
data = pd.concat([data1, data2, data3, data4, data5, data6])
data = data.sort('PassengerId') | code |
1007495/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
data.describe() | code |
128021494/cell_4 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import random
data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',')
data = np.delete(data, [0, 1], axis=1)
file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r')
lines = file.readlines()
count = 0
labels = np.zeros((data.shape[0], 1))
for line in lines:
if line[0] == 'M':
labels[count] = 0
else:
labels[count] = 1
count = count + 1
data_comb = []
for i in range(569):
data_comb.append((data[i], labels[i]))
random.shuffle(data_comb)
print(len(data_comb))
data = np.empty((569, 30))
for i in range(569):
data[i] = np.array(data_comb[i][0])
labels[i] = data_comb[i][1]
print(data[9])
print(labels[9])
border = int(data.shape[0] * 0.75)
train_x = data[0:border]
train_y = labels[0:border]
test_x = data[border + 1:data.shape[0]]
test_y = labels[border + 1:data.shape[0]]
print(train_x[9])
print(train_y[9]) | code |
128021494/cell_6 | [
"text_plain_output_1.png"
] | import tensorflow as tf
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(30))
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002), loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.TruePositives(), tf.keras.metrics.FalsePositives(), tf.keras.metrics.TrueNegatives(), tf.keras.metrics.FalseNegatives()])
model.summary() | code |
128021494/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from tensorflow.keras import layers | code |
128021494/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import random
import tensorflow as tf
data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',')
data = np.delete(data, [0, 1], axis=1)
file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r')
lines = file.readlines()
count = 0
labels = np.zeros((data.shape[0], 1))
for line in lines:
if line[0] == 'M':
labels[count] = 0
else:
labels[count] = 1
count = count + 1
data_comb = []
for i in range(569):
data_comb.append((data[i], labels[i]))
random.shuffle(data_comb)
data = np.empty((569, 30))
for i in range(569):
data[i] = np.array(data_comb[i][0])
labels[i] = data_comb[i][1]
border = int(data.shape[0] * 0.75)
train_x = data[0:border]
train_y = labels[0:border]
test_x = data[border + 1:data.shape[0]]
test_y = labels[border + 1:data.shape[0]]
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
train_x = sc.fit_transform(train_x)
test_x = sc.transform(test_x)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(30))
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002), loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.TruePositives(), tf.keras.metrics.FalsePositives(), tf.keras.metrics.TrueNegatives(), tf.keras.metrics.FalseNegatives()])
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
batch_size = 64
print('Fit on NumPy data')
history = model.fit(train_x, train_y, batch_size=batch_size, epochs=1)
dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).batch(batch_size)
print('Fit on Dataset')
history = model.fit(dataset, epochs=15) | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.