path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122264561/cell_99
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_react_datetime = df_react.set_index('Datetime').drop('Unnamed: 0', axis=1) counts = df_react.groupby(pd.Grouper(key='Datetime', freq='M')).count() freq_df = pd.DataFrame(counts['Type']) freq_df
code
122264561/cell_55
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) df_cont_score = df_cont_score.reset_index() df_cont_score
code
122264561/cell_76
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() df_content.groupby('Type')['Type_score'].mean() df_content
code
122264561/cell_92
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_react
code
122264561/cell_91
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_profile = df_profile.drop('Unnamed: 0', axis=1) df_profile2 = df_profile.explode('Interests') df_profile2
code
122264561/cell_65
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content['type_sentiment'].value_counts()
code
122264561/cell_48
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment
code
122264561/cell_73
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() df_content.groupby('Type')['Type_score'].mean() df_content
code
122264561/cell_61
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0)
code
122264561/cell_72
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() df_content.groupby('Type')['Type_score'].mean()
code
122264561/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['Type'].value_counts()
code
122264561/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_content['Category'].value_counts()
code
122264561/cell_69
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content['type_sentiment'].value_counts() / 10
code
122264561/cell_86
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_profile = df_profile.drop('Unnamed: 0', axis=1) df_profile
code
122264561/cell_52
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) df_cont_score
code
122264561/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122264561/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['Content ID'].value_counts()
code
122264561/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum()
code
122264561/cell_82
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() df_content.groupby('Type')['Type_score'].mean() data = df_content['Category'].value_counts().index data = data[:19] index = df_content[df_content['Category'].isin(data)].groupby('Category')['Type_score'].mean().sort_values().index[:19] plt.figure(figsize=(12, 6), dpi=200) sns.barplot(x='Category', y='Type_score', data=df_content[df_content['Category'].isin(data)], order=index) plt.xticks(rotation=90) plt.xlabel('Category') plt.ylabel('Reaction Score') plt.title('Content Category vs Reaction Score') plt.savefig('Content Category vs Mean Reaction Score.jpeg', bbox_inches='tight')
code
122264561/cell_51
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_sentiment
code
122264561/cell_62
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content
code
122264561/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_rtypes2 = df_rtypes[['Type', 'Score']] df_rtypes2 = df_rtypes2.set_index('Type') my_map = df_rtypes2.to_dict().get('Score') my_map
code
122264561/cell_95
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_react_datetime = df_react.set_index('Datetime').drop('Unnamed: 0', axis=1) df_react_datetime
code
122264561/cell_80
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() df_content.groupby('Type')['Type_score'].mean() data = df_content['Category'].value_counts().index data = data[:19] data
code
122264561/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['Datetime'].dt.date.max()
code
122264561/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['Datetime']
code
122264561/cell_66
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content['type_sentiment'].value_counts()
code
122264561/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react
code
122264561/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum() df_react[df_react['Type_score'].isnull()].isnull().sum()
code
122264561/cell_77
[ "text_plain_output_1.png" ]
code
122264561/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_react['type_sentiment'].value_counts() / 245.73
code
122264561/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] mode_counts
code
122264561/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['Datetime'].dt.date.min()
code
122264561/cell_97
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_react_datetime = df_react.set_index('Datetime').drop('Unnamed: 0', axis=1) counts = df_react.groupby(pd.Grouper(key='Datetime', freq='M')).count() counts['Type']
code
122264561/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react['User ID'].value_counts()
code
122264561/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') my_map2 = df_rtypes.set_index('Type').to_dict().get('Sentiment') my_map2
code
122264561/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_react.isnull().sum() df_react = df_react.dropna() df_react
code
122264561/cell_71
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum() plt.figure(figsize=(10, 6), dpi=200) sns.countplot(x='type_sentiment', data=df_content) plt.savefig('Mode Reaction Distribution Of Content.jpeg')
code
122264561/cell_70
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_profile = pd.read_csv('/kaggle/input/a360-internship-practice/Profile.csv') df_location = pd.read_csv('/kaggle/input/a360-internship-practice/Location.csv') df_content = pd.read_csv('/kaggle/input/a360-internship-practice/Content.csv') df_rtypes = pd.read_csv('/kaggle/input/a360-internship-practice/ReactionTypes.csv') df_react = pd.read_csv('/kaggle/input/a360-internship-practice/Reactions.csv') df_session = pd.read_csv('/kaggle/input/a360-internship-practice/Session.csv') df_user = pd.read_csv('/kaggle/input/a360-internship-practice/User.csv') df_react['Datetime'] = pd.to_datetime(df_react['Datetime']) df_content = df_content.drop(['Unnamed: 0'], axis=1) df_react.isnull().sum() df_react = df_react.dropna() df_react_users = df_react.drop(['Unnamed: 0', 'Content ID', 'Type'], axis=1) df_cont_score = pd.DataFrame(df_react.groupby('Content ID')['Type_score'].mean()) mode_counts = df_react.groupby('Content ID')['type_sentiment'].agg(lambda x: x.mode().iloc[0]) selected_mode = mode_counts.index[0] df_cont_sentiment = pd.DataFrame(mode_counts) df_cont_sentiment = df_cont_sentiment.reset_index() df_cont_score = df_cont_score.reset_index() df_content = pd.merge(df_content, df_cont_score, how='outer') df_content = pd.merge(df_content, df_cont_sentiment, how='outer') df_content.fillna(0) df_content.isnull().sum()
code
122264561/cell_85
[ "application_vnd.jupyter.stderr_output_1.png" ]
df.explode('my_list_column')
code
325724/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import os from sklearn.preprocessing import LabelEncoder from scipy.sparse import csr_matrix, hstack from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import log_loss
code
325724/cell_7
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.preprocessing import LabelEncoder import numpy as np import os import pandas as pd datadir = '../input' gatrain = pd.read_csv(os.path.join(datadir, 'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir, 'gender_age_test.csv'), index_col='device_id') phone = pd.read_csv(os.path.join(datadir, 'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id', keep='first').set_index('device_id') events = pd.read_csv(os.path.join(datadir, 'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir, 'app_events.csv'), usecols=['event_id', 'app_id', 'is_active'], dtype={'is_active': bool}) applabels = pd.read_csv(os.path.join(datadir, 'app_labels.csv')) labelcats = pd.read_csv(os.path.join(datadir, 'label_categories.csv'), index_col='label_id', squeeze=True) gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0]) brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
code
325724/cell_8
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.preprocessing import LabelEncoder import numpy as np import os import pandas as pd datadir = '../input' gatrain = pd.read_csv(os.path.join(datadir, 'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir, 'gender_age_test.csv'), index_col='device_id') phone = pd.read_csv(os.path.join(datadir, 'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id', keep='first').set_index('device_id') events = pd.read_csv(os.path.join(datadir, 'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir, 'app_events.csv'), usecols=['event_id', 'app_id', 'is_active'], dtype={'is_active': bool}) applabels = pd.read_csv(os.path.join(datadir, 'app_labels.csv')) labelcats = pd.read_csv(os.path.join(datadir, 'label_categories.csv'), index_col='label_id', squeeze=True) gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0]) brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) m = phone.phone_brand.str.cat(phone.device_model) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape))
code
325724/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.preprocessing import LabelEncoder import numpy as np import os import pandas as pd datadir = '../input' gatrain = pd.read_csv(os.path.join(datadir, 'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir, 'gender_age_test.csv'), index_col='device_id') phone = pd.read_csv(os.path.join(datadir, 'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id', keep='first').set_index('device_id') events = pd.read_csv(os.path.join(datadir, 'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir, 'app_events.csv'), usecols=['event_id', 'app_id', 'is_active'], dtype={'is_active': bool}) applabels = pd.read_csv(os.path.join(datadir, 'app_labels.csv')) labelcats = pd.read_csv(os.path.join(datadir, 'label_categories.csv'), index_col='label_id', squeeze=True) gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0]) brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) m = phone.phone_brand.str.cat(phone.device_model) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) appencoder = LabelEncoder().fit(appevents.app_id) appevents['app'] = appencoder.transform(appevents.app_id) napps = len(appencoder.classes_) deviceapps = appevents.merge(events[['device_id']], how='left', left_on='event_id', right_index=True).groupby(['device_id', 'app'])['app'].agg(['size']).merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True).merge(gatest[['testrow']], how='left', left_index=True, right_index=True).reset_index() d = deviceapps.dropna(subset=['trainrow']) Xtr_app = csr_matrix((np.ones(d.shape[0]), (d.trainrow, d.app)), shape=(gatrain.shape[0], napps)) d = deviceapps.dropna(subset=['testrow']) Xte_app = csr_matrix((np.ones(d.shape[0]), (d.testrow, d.app)), shape=(gatest.shape[0], napps)) print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape))
code
327693/cell_13
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import tensorflow as tf train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_val_ratio = 0.7 train_data_size = len(train_data) train_set = train_data[:int(train_data_size * train_val_ratio)] val_set = train_data[int(train_data_size * train_val_ratio) + 1:] init = tf.initialize_all_variables() saver = tf.train.Saver() sess = tf.Session() sess.run(init) train_eval_list = [] val_eval_list = [] for i in range(100): batch = train_set.sample(frac=0.1) batch_xs = batch.drop('label', axis=1).as_matrix() / 255.0 batch_ys = pd.get_dummies(batch['label']).as_matrix() val_xs = val_set.drop('label', axis=1).as_matrix() / 255.0 val_ys = pd.get_dummies(val_set['label']).as_matrix() sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) train_eval = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}) val_eval = sess.run(accuracy, feed_dict={x: val_xs, y_: val_ys}) train_eval_list.append(train_eval) val_eval_list.append(val_eval)
code
327693/cell_15
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') f, axarr = plt.subplots(10, 10) for row in range(10): for column in range(10): entry = train_data[train_data['label']==column].iloc[row].drop('label').as_matrix() axarr[row, column].imshow(entry.reshape([28, 28])) axarr[row, column].get_xaxis().set_visible(False) axarr[row, column].get_yaxis().set_visible(False) x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_val_ratio = 0.7 train_data_size = len(train_data) train_set = train_data[:int(train_data_size * train_val_ratio)] val_set = train_data[int(train_data_size * train_val_ratio) + 1:] init = tf.initialize_all_variables() saver = tf.train.Saver() sess = tf.Session() sess.run(init) train_eval_list = [] val_eval_list = [] for i in range(100): batch = train_set.sample(frac=0.1) batch_xs = batch.drop('label', axis=1).as_matrix() / 255.0 batch_ys = pd.get_dummies(batch['label']).as_matrix() val_xs = val_set.drop('label', axis=1).as_matrix() / 255.0 val_ys = pd.get_dummies(val_set['label']).as_matrix() sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) train_eval = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}) val_eval = sess.run(accuracy, feed_dict={x: val_xs, y_: val_ys}) train_eval_list.append(train_eval) val_eval_list.append(val_eval) plt.plot(train_eval_list, label='train set') plt.plot(val_eval_list, label='validation set') ax.set_xlabel('Epoch') ax.set_ylabel('Accuracy') plt.legend()
code
327693/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import tensorflow as tf train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_val_ratio = 0.7 train_data_size = len(train_data) train_set = train_data[:int(train_data_size * train_val_ratio)] val_set = train_data[int(train_data_size * train_val_ratio) + 1:] init = tf.initialize_all_variables() saver = tf.train.Saver() sess = tf.Session() sess.run(init) train_eval_list = [] val_eval_list = [] for i in range(100): batch = train_set.sample(frac=0.1) batch_xs = batch.drop('label', axis=1).as_matrix() / 255.0 batch_ys = pd.get_dummies(batch['label']).as_matrix() val_xs = val_set.drop('label', axis=1).as_matrix() / 255.0 val_ys = pd.get_dummies(val_set['label']).as_matrix() sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) train_eval = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}) val_eval = sess.run(accuracy, feed_dict={x: val_xs, y_: val_ys}) train_eval_list.append(train_eval) val_eval_list.append(val_eval) saver.save(sess, 'logistic_regression.ckpt') sess.close()
code
327693/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') print(train_data.shape) print(test_data.shape)
code
327693/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') f, axarr = plt.subplots(10, 10) for row in range(10): for column in range(10): entry = train_data[train_data['label'] == column].iloc[row].drop('label').as_matrix() axarr[row, column].imshow(entry.reshape([28, 28])) axarr[row, column].get_xaxis().set_visible(False) axarr[row, column].get_yaxis().set_visible(False)
code
48165682/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df_shows = df[df['type'] == 'TV Show'] df_movies = df[df['type'] == 'Movie'] df_date = df_shows[['date_added']].dropna() df_date['year'] = df_date['date_added'].apply(lambda x: x.split(', ')[-1]) df_date['month'] = df_date['date_added'].apply(lambda x: x.lstrip().split(' ')[0]) month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'][::-1] dfu = df_date.groupby('year')['month'].value_counts().unstack().fillna(0)[month_order].T plt.pcolor(dfu, cmap='afmhot_r', edgecolors='white', linewidths=2) plt.xticks(np.arange(0.5, len(dfu.columns), 1), dfu.columns, fontsize=7, fontfamily='serif') plt.yticks(np.arange(0.5, len(dfu.index), 1), dfu.index, fontsize=7, fontfamily='serif') cbar = plt.colorbar() cbar.ax.tick_params(labelsize=8) cbar.ax.minorticks_on() df['date_added'] = pd.to_datetime(df['date_added']) df['day_added'] = df['date_added'].dt.day df['year_added'] = df['date_added'].dt.year df['month_added'] = df['date_added'].dt.month df['year_added'].astype(int) df['day_added'].astype(int) f,ax=plt.subplots(1,2,figsize=(18,8)) df['type'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Type of Movie') ax[0].set_ylabel('Count') sns.countplot('type',data=df,ax=ax[1],order=df['type'].value_counts().index) ax[1].set_title('Count of Source') plt.show() f, ax = plt.subplots(1, 2, figsize=(18, 8)) df['rating'].value_counts().plot.pie(autopct='%1.1f%%', ax=ax[0], shadow=True) ax[0].set_title('Movie Rating') ax[0].set_ylabel('Count') sns.countplot('rating', data=df, ax=ax[1], order=df['rating'].value_counts().index) ax[1].set_title('Count of Rating') plt.show()
code
48165682/cell_4
[ "image_output_1.png" ]
!pip install plotly !pip install cufflinks !pip install textblob
code
48165682/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
48165682/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df['date_added'] = pd.to_datetime(df['date_added']) df['day_added'] = df['date_added'].dt.day df['year_added'] = df['date_added'].dt.year df['month_added'] = df['date_added'].dt.month df['year_added'].astype(int) df['day_added'].astype(int) df.head()
code
48165682/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df_shows = df[df['type'] == 'TV Show'] df_movies = df[df['type'] == 'Movie'] df_date = df_shows[['date_added']].dropna() df_date['year'] = df_date['date_added'].apply(lambda x: x.split(', ')[-1]) df_date['month'] = df_date['date_added'].apply(lambda x: x.lstrip().split(' ')[0]) month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'][::-1] dfu = df_date.groupby('year')['month'].value_counts().unstack().fillna(0)[month_order].T plt.pcolor(dfu, cmap='afmhot_r', edgecolors='white', linewidths=2) plt.xticks(np.arange(0.5, len(dfu.columns), 1), dfu.columns, fontsize=7, fontfamily='serif') plt.yticks(np.arange(0.5, len(dfu.index), 1), dfu.index, fontsize=7, fontfamily='serif') cbar = plt.colorbar() cbar.ax.tick_params(labelsize=8) cbar.ax.minorticks_on() df['date_added'] = pd.to_datetime(df['date_added']) df['day_added'] = df['date_added'].dt.day df['year_added'] = df['date_added'].dt.year df['month_added'] = df['date_added'].dt.month df['year_added'].astype(int) df['day_added'].astype(int) f,ax=plt.subplots(1,2,figsize=(18,8)) df['type'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Type of Movie') ax[0].set_ylabel('Count') sns.countplot('type',data=df,ax=ax[1],order=df['type'].value_counts().index) ax[1].set_title('Count of Source') plt.show() f,ax=plt.subplots(1,2,figsize=(18,8)) df['rating'].value_counts().plot.pie(autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Movie Rating') ax[0].set_ylabel('Count') sns.countplot('rating',data=df,ax=ax[1],order=df['rating'].value_counts().index) ax[1].set_title('Count of Rating') plt.show() group_country_movies = df.groupby('country')['show_id'].count().sort_values(ascending=False).head(10) plt.ioff() group_country_movies = df.groupby('year_added')['show_id'].count().sort_values(ascending=False).head(10) plt.subplots(figsize=(15, 8)) group_country_movies.plot('bar', fontsize=12, color='blue') plt.xlabel('Number of Movies', fontsize=12) plt.ylabel('Year', fontsize=12) plt.title('Movie Count By Year', fontsize=12) plt.ioff()
code
48165682/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df_shows = df[df['type'] == 'TV Show'] df_movies = df[df['type'] == 'Movie'] df_date = df_shows[['date_added']].dropna() df_date['year'] = df_date['date_added'].apply(lambda x: x.split(', ')[-1]) df_date['month'] = df_date['date_added'].apply(lambda x: x.lstrip().split(' ')[0]) month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'][::-1] dfu = df_date.groupby('year')['month'].value_counts().unstack().fillna(0)[month_order].T plt.pcolor(dfu, cmap='afmhot_r', edgecolors='white', linewidths=2) plt.xticks(np.arange(0.5, len(dfu.columns), 1), dfu.columns, fontsize=7, fontfamily='serif') plt.yticks(np.arange(0.5, len(dfu.index), 1), dfu.index, fontsize=7, fontfamily='serif') cbar = plt.colorbar() cbar.ax.tick_params(labelsize=8) cbar.ax.minorticks_on() df['date_added'] = pd.to_datetime(df['date_added']) df['day_added'] = df['date_added'].dt.day df['year_added'] = df['date_added'].dt.year df['month_added'] = df['date_added'].dt.month df['year_added'].astype(int) df['day_added'].astype(int) f,ax=plt.subplots(1,2,figsize=(18,8)) df['type'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Type of Movie') ax[0].set_ylabel('Count') sns.countplot('type',data=df,ax=ax[1],order=df['type'].value_counts().index) ax[1].set_title('Count of Source') plt.show() f,ax=plt.subplots(1,2,figsize=(18,8)) df['rating'].value_counts().plot.pie(autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Movie Rating') ax[0].set_ylabel('Count') sns.countplot('rating',data=df,ax=ax[1],order=df['rating'].value_counts().index) ax[1].set_title('Count of Rating') plt.show() group_country_movies = df.groupby('country')['show_id'].count().sort_values(ascending=False).head(10) plt.subplots(figsize=(15, 8)) group_country_movies.plot('bar', fontsize=12, color='blue') plt.xlabel('Number of Movies', fontsize=12) plt.ylabel('Country', fontsize=12) plt.title('Movie count by Country', fontsize=12) plt.ioff()
code
48165682/cell_8
[ "image_output_1.png" ]
import cufflinks as cf import plotly as py py.offline.init_notebook_mode(connected=True) cf.go_offline()
code
48165682/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df_shows = df[df['type'] == 'TV Show'] df_movies = df[df['type'] == 'Movie'] df_date = df_shows[['date_added']].dropna() df_date['year'] = df_date['date_added'].apply(lambda x: x.split(', ')[-1]) df_date['month'] = df_date['date_added'].apply(lambda x: x.lstrip().split(' ')[0]) month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'][::-1] dfu = df_date.groupby('year')['month'].value_counts().unstack().fillna(0)[month_order].T plt.figure(figsize=(10, 7), dpi=200) plt.pcolor(dfu, cmap='afmhot_r', edgecolors='white', linewidths=2) plt.xticks(np.arange(0.5, len(dfu.columns), 1), dfu.columns, fontsize=7, fontfamily='serif') plt.yticks(np.arange(0.5, len(dfu.index), 1), dfu.index, fontsize=7, fontfamily='serif') plt.title('Netflix Contents Update', fontsize=12, fontfamily='calibri', fontweight='bold', position=(0.2, 1.0 + 0.02)) cbar = plt.colorbar() cbar.ax.tick_params(labelsize=8) cbar.ax.minorticks_on() plt.show()
code
48165682/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.isnull().sum() df = df.dropna() df_shows = df[df['type'] == 'TV Show'] df_movies = df[df['type'] == 'Movie'] df_date = df_shows[['date_added']].dropna() df_date['year'] = df_date['date_added'].apply(lambda x: x.split(', ')[-1]) df_date['month'] = df_date['date_added'].apply(lambda x: x.lstrip().split(' ')[0]) month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'][::-1] dfu = df_date.groupby('year')['month'].value_counts().unstack().fillna(0)[month_order].T plt.pcolor(dfu, cmap='afmhot_r', edgecolors='white', linewidths=2) plt.xticks(np.arange(0.5, len(dfu.columns), 1), dfu.columns, fontsize=7, fontfamily='serif') plt.yticks(np.arange(0.5, len(dfu.index), 1), dfu.index, fontsize=7, fontfamily='serif') cbar = plt.colorbar() cbar.ax.tick_params(labelsize=8) cbar.ax.minorticks_on() df['date_added'] = pd.to_datetime(df['date_added']) df['day_added'] = df['date_added'].dt.day df['year_added'] = df['date_added'].dt.year df['month_added'] = df['date_added'].dt.month df['year_added'].astype(int) df['day_added'].astype(int) f, ax = plt.subplots(1, 2, figsize=(18, 8)) df['type'].value_counts().plot.pie(explode=[0, 0.1], autopct='%1.1f%%', ax=ax[0], shadow=True) ax[0].set_title('Type of Movie') ax[0].set_ylabel('Count') sns.countplot('type', data=df, ax=ax[1], order=df['type'].value_counts().index) ax[1].set_title('Count of Source') plt.show()
code
48165682/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') df.head()
code
48165682/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/netflix-shows/netflix_titles.csv') print('Rows :', df.shape[0]) print('Columns :', df.shape[1]) print('\nFeatures :\n :', df.columns.tolist()) print('\nMissing values :', df.isnull().values.sum()) print('\nUnique values : \n', df.nunique())
code
72075672/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id') X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id') X_full.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X_full.SalePrice X_full.drop(['SalePrice'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() X_train = pd.get_dummies(X_train) X_valid = pd.get_dummies(X_valid) X_test = pd.get_dummies(X_test) X_train, X_valid = X_train.align(X_valid, join='left', axis=1) X_train, X_test = X_train.align(X_test, join='left', axis=1) from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error my_model = XGBRegressor(n_estimators=900, learning_rate=0.09) my_model.fit(X_train, y_train) preds = my_model_2.predict(X_valid) mae = mean_absolute_error(preds, y_valid) print('Mean Absolute Error:', mae)
code
72075672/cell_2
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id') X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id') X_full.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X_full.SalePrice X_full.drop(['SalePrice'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() X_train.head()
code
122254015/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
a = 2 b = 4 c = 5 d = a + b + c type(d) a = 2 b = 3 c = a a = b b = c print(a, b) type(b) type(a)
code
122254015/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
Sales_Store_A = input('Put your sale')
code
122254015/cell_2
[ "text_plain_output_1.png" ]
Revenue = 1200 CostofSales = 750 CostofMarketing = 100 NetProfitMargin = int((Revenue - (CostofSales + CostofMarketing)) / Revenue * 100) print(NetProfitMargin, 'Percentage')
code
122254015/cell_1
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
a = 2 b = 4 c = 5 d = a + b + c print(d) type(d)
code
104113435/cell_23
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical from scipy import stats from sklearn.model_selection import train_test_split from sklearn.utils import resample import csv import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 signals = denoise(signals) signals = stats.zscore(signals) example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): example_beat_printed = True X.append(beat) y.append(arrhythmia_index) for i in range(0, len(X)): X[i] = np.append(X[i], y[i]) X_train_df = pd.DataFrame(X) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() my_circle = plt.Circle((0, 0), 0.7, color='white') p = plt.gcf() p.gca().add_artist(my_circle) df_1 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 1] df_2 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 2] df_3 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 3] df_4 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 4] df_0 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 0].sample(n=5000, random_state=42) df_1_upsample = resample(df_1, replace=True, n_samples=5000, random_state=122) df_2_upsample = resample(df_2, replace=True, n_samples=5000, random_state=123) df_3_upsample = resample(df_3, replace=True, n_samples=5000, random_state=124) df_4_upsample = resample(df_4, replace=True, n_samples=5000, random_state=125) X_train_df = pd.concat([df_0, df_1_upsample, df_2_upsample, df_3_upsample, df_4_upsample]) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() my_circle = plt.Circle((0, 0), 0.7, color='white') p = plt.gcf() p.gca().add_artist(my_circle) train, test = train_test_split(X_train_df, test_size=0.2) target_train = train[train.shape[1] - 1] target_test = test[test.shape[1] - 1] train_y = to_categorical(target_train) test_y = to_categorical(target_test) print(np.shape(train_y), np.shape(test_y))
code
104113435/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy import stats from sklearn.utils import resample import csv import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 signals = denoise(signals) signals = stats.zscore(signals) example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): example_beat_printed = True X.append(beat) y.append(arrhythmia_index) X_train_df = pd.DataFrame(X) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() my_circle = plt.Circle((0, 0), 0.7, color='white') p = plt.gcf() p.gca().add_artist(my_circle) df_1 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 1] df_2 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 2] df_3 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 3] df_4 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 4] df_0 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 0].sample(n=5000, random_state=42) df_1_upsample = resample(df_1, replace=True, n_samples=5000, random_state=122) df_2_upsample = resample(df_2, replace=True, n_samples=5000, random_state=123) df_3_upsample = resample(df_3, replace=True, n_samples=5000, random_state=124) df_4_upsample = resample(df_4, replace=True, n_samples=5000, random_state=125) X_train_df = pd.concat([df_0, df_1_upsample, df_2_upsample, df_3_upsample, df_4_upsample]) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() print(per_class) plt.figure(figsize=(20, 10)) my_circle = plt.Circle((0, 0), 0.7, color='white') plt.pie(per_class, labels=['N', 'L', 'R', 'A', 'V'], colors=['tab:blue', 'tab:orange', 'tab:purple', 'tab:olive', 'tab:green'], autopct='%1.1f%%') p = plt.gcf() p.gca().add_artist(my_circle) plt.show()
code
104113435/cell_15
[ "text_plain_output_1.png" ]
from scipy import stats import csv import matplotlib.pyplot as plt import numpy as np import os import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 signals = denoise(signals) signals = stats.zscore(signals) example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): example_beat_printed = True X.append(beat) y.append(arrhythmia_index) for i in range(0, len(X)): X[i] = np.append(X[i], y[i]) print(np.shape(X))
code
104113435/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy import stats import csv import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 signals = denoise(signals) signals = stats.zscore(signals) example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): example_beat_printed = True X.append(beat) y.append(arrhythmia_index) X_train_df = pd.DataFrame(X) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() print(per_class) plt.figure(figsize=(20, 10)) my_circle = plt.Circle((0, 0), 0.7, color='white') plt.pie(per_class, labels=['N', 'L', 'R', 'A', 'V'], colors=['tab:blue', 'tab:orange', 'tab:purple', 'tab:olive', 'tab:green'], autopct='%1.1f%%') p = plt.gcf() p.gca().add_artist(my_circle) plt.show()
code
104113435/cell_14
[ "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from scipy import stats import csv import matplotlib.pyplot as plt import numpy as np import os import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 if r is 1: plt.title(records[1] + ' Wave') plt.plot(signals[0:700]) plt.show() signals = denoise(signals) if r is 1: plt.title(records[1] + ' wave after denoised') plt.plot(signals[0:700]) plt.show() signals = stats.zscore(signals) if r is 1: plt.title(records[1] + ' wave after z-score normalization ') plt.plot(signals[0:700]) plt.show() example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): plt.title('A Beat from ' + records[1] + ' Wave') plt.plot(beat) plt.show() example_beat_printed = True X.append(beat) y.append(arrhythmia_index) print(np.shape(X), np.shape(y))
code
104113435/cell_22
[ "text_plain_output_1.png" ]
from scipy import stats from sklearn.model_selection import train_test_split from sklearn.utils import resample import csv import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pywt plt.rcParams['figure.figsize'] = (30, 6) plt.rcParams['lines.linewidth'] = 1 plt.rcParams['lines.color'] = 'b' plt.rcParams['axes.grid'] = True def denoise(data): w = pywt.Wavelet('sym4') maxlev = pywt.dwt_max_level(len(data), w.dec_len) threshold = 0.04 coeffs = pywt.wavedec(data, 'sym4', level=maxlev) for i in range(1, len(coeffs)): coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i])) datarec = pywt.waverec(coeffs, 'sym4') return datarec path = '/kaggle/input/mitbit-arrhythmia-database/mitbih_database/' window_size = 180 maximum_counting = 10000 classes = ['N', 'L', 'R', 'A', 'V'] n_classes = len(classes) count_classes = [0] * n_classes X = list() y = list() filenames = next(os.walk(path))[2] records = list() annotations = list() filenames.sort() for f in filenames: filename, file_extension = os.path.splitext(f) if file_extension == '.csv': records.append(path + filename + file_extension) else: annotations.append(path + filename + file_extension) for r in range(0, len(records)): signals = [] with open(records[r], 'rt') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') row_index = -1 for row in spamreader: if row_index >= 0: signals.insert(row_index, int(row[1])) row_index += 1 signals = denoise(signals) signals = stats.zscore(signals) example_beat_printed = False with open(annotations[r], 'r') as fileID: data = fileID.readlines() beat = list() for d in range(1, len(data)): splitted = data[d].split(' ') splitted = filter(None, splitted) next(splitted) pos = int(next(splitted)) arrhythmia_type = next(splitted) if arrhythmia_type in classes: arrhythmia_index = classes.index(arrhythmia_type) count_classes[arrhythmia_index] += 1 if window_size <= pos and pos < len(signals) - window_size: beat = signals[pos - window_size:pos + window_size] if r is 1 and (not example_beat_printed): example_beat_printed = True X.append(beat) y.append(arrhythmia_index) for i in range(0, len(X)): X[i] = np.append(X[i], y[i]) X_train_df = pd.DataFrame(X) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() my_circle = plt.Circle((0, 0), 0.7, color='white') p = plt.gcf() p.gca().add_artist(my_circle) df_1 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 1] df_2 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 2] df_3 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 3] df_4 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 4] df_0 = X_train_df[X_train_df[X_train_df.shape[1] - 1] == 0].sample(n=5000, random_state=42) df_1_upsample = resample(df_1, replace=True, n_samples=5000, random_state=122) df_2_upsample = resample(df_2, replace=True, n_samples=5000, random_state=123) df_3_upsample = resample(df_3, replace=True, n_samples=5000, random_state=124) df_4_upsample = resample(df_4, replace=True, n_samples=5000, random_state=125) X_train_df = pd.concat([df_0, df_1_upsample, df_2_upsample, df_3_upsample, df_4_upsample]) per_class = X_train_df[X_train_df.shape[1] - 1].value_counts() my_circle = plt.Circle((0, 0), 0.7, color='white') p = plt.gcf() p.gca().add_artist(my_circle) train, test = train_test_split(X_train_df, test_size=0.2) print('X_train : ', np.shape(train)) print('X_test : ', np.shape(test))
code
129014767/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number']) list = [['a', 25], ['b', 30], ['c', 26], ['d', 22]] pd.DataFrame(list, columns=['Alphabet', 'Number']) dict = {'Alphabet': ['a', 'b', 'c', 'd', 'e'], 'Number': [1, 2, 3, 4, 5]} pd.DataFrame(dict)
code
129014767/cell_4
[ "text_html_output_1.png" ]
import pandas as pd pd.Series([1, 2, 3, 4, 5])
code
129014767/cell_6
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series
code
129014767/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number'])
code
129014767/cell_7
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series
code
129014767/cell_8
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series
code
129014767/cell_15
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number']) list = [['a', 25], ['b', 30], ['c', 26], ['d', 22]] pd.DataFrame(list, columns=['Alphabet', 'Number']) dict = {'Alphabet': ['a', 'b', 'c', 'd', 'e'], 'Number': [1, 2, 3, 4, 5]} pd.DataFrame(dict) array = np.array([1, 2, 3, 4, 5]) pd.DataFrame(array, columns=['Number']) array = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]]) pd.DataFrame(array, columns=['Number', 'Square'])
code
129014767/cell_16
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number']) list = [['a', 25], ['b', 30], ['c', 26], ['d', 22]] pd.DataFrame(list, columns=['Alphabet', 'Number']) dict = {'Alphabet': ['a', 'b', 'c', 'd', 'e'], 'Number': [1, 2, 3, 4, 5]} pd.DataFrame(dict) array = np.array([1, 2, 3, 4, 5]) pd.DataFrame(array, columns=['Number']) array = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]]) pd.DataFrame(array, columns=['Number', 'Square']) pd.DataFrame(range(5))
code
129014767/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number']) list = [['a', 25], ['b', 30], ['c', 26], ['d', 22]] pd.DataFrame(list, columns=['Alphabet', 'Number']) dict = {'Alphabet': ['a', 'b', 'c', 'd', 'e'], 'Number': [1, 2, 3, 4, 5]} pd.DataFrame(dict) array = np.array([1, 2, 3, 4, 5]) pd.DataFrame(array, columns=['Number'])
code
129014767/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5])
code
129014767/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series array = np.array([10, 20, 30, 40, 50]) series = pd.Series(array) series dict = {'a': 10, 'b': 20, 'd': 30} series = pd.Series(dict) series series = pd.Series(range(15)) series pd.DataFrame([1, 2, 3, 4, 5]) pd.DataFrame([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'], columns=['Number']) list = [['a', 25], ['b', 30], ['c', 26], ['d', 22]] pd.DataFrame(list, columns=['Alphabet', 'Number'])
code
129014767/cell_5
[ "text_html_output_1.png" ]
import pandas as pd pd.Series([1, 2, 3, 4, 5]) list = [1, 2, 3, 4, 5] series = pd.Series(list) series
code
105181512/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np df = pd.read_csv('haberman.csv') df.head()
code
17105960/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) correlated_values['abs_correlation'] = correlated_values.Correlations.abs() train_fields = correlated_values.sort_values('Correlations', ascending=False).query('abs_correlation>0.8') train_fields.sample(5)
code
17105960/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print(train.isnull().values.any()) print(test.isnull().values.any())
code
17105960/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) print(train.dtypes.value_counts()) print(test.dtypes.value_counts())
code
17105960/cell_2
[ "text_html_output_1.png" ]
import os import pandas as pd print(os.listdir('../input')) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv')
code
17105960/cell_11
[ "text_html_output_1.png" ]
import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) correlated_values.head()
code
17105960/cell_7
[ "text_html_output_1.png" ]
import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) train.describe()
code
17105960/cell_8
[ "text_html_output_1.png" ]
import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity']
code
17105960/cell_15
[ "text_html_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) split_data = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42) train_idx, val_idx = next(split_data.split(train[feature_cols], train.Activity)) x_train = train.loc[train_idx, feature_cols] y_train = train.loc[train_idx, 'Activity'] x_val = train.loc[val_idx, feature_cols] y_val = train.loc[val_idx, 'Activity'] print(y_train.value_counts(normalize=True)) print(y_val.value_counts(normalize=True))
code
17105960/cell_16
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.model_selection import StratifiedShuffleSplit import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) split_data = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42) train_idx, val_idx = next(split_data.split(train[feature_cols], train.Activity)) x_train = train.loc[train_idx, feature_cols] y_train = train.loc[train_idx, 'Activity'] x_val = train.loc[val_idx, feature_cols] y_val = train.loc[val_idx, 'Activity'] lr_l2 = LogisticRegressionCV(cv=4, penalty='l2', max_iter=1000, n_jobs=-1) lr_l2 = lr_l2.fit(x_train, y_train)
code
17105960/cell_3
[ "text_html_output_1.png" ]
import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
17105960/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.model_selection import StratifiedShuffleSplit import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) split_data = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42) train_idx, val_idx = next(split_data.split(train[feature_cols], train.Activity)) x_train = train.loc[train_idx, feature_cols] y_train = train.loc[train_idx, 'Activity'] x_val = train.loc[val_idx, feature_cols] y_val = train.loc[val_idx, 'Activity'] lr_l2 = LogisticRegressionCV(cv=4, penalty='l2', max_iter=1000, n_jobs=-1) lr_l2 = lr_l2.fit(x_train, y_train) y_predict = list() y_proba = list() labels = ['lr_l2'] models = [lr_l2] for lab, mod in zip(labels, models): y_predict.append(pd.Series(mod.predict(x_val), name=lab)) y_proba.append(pd.Series(mod.predict_proba(x_val).max(axis=1), name=lab)) y_predict = pd.concat(y_predict, axis=1) y_proba = pd.concat(y_proba, axis=1) y_predict.head()
code
17105960/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5)
code
17105960/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.drop('subject', axis=1, inplace=True) test.drop('subject', axis=1, inplace=True) object_feature = train.dtypes == np.object object_feature = train.columns[object_feature] object_feature train['Activity'] train.Activity.sample(5) feature_cols = train.columns[:-1] correlated_values = train[feature_cols].corr() correlated_values = correlated_values.stack().to_frame().reset_index().rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0: 'Correlations'}) correlated_values['abs_correlation'] = correlated_values.Correlations.abs() correlated_values.head()
code
104126055/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) print('Accuracy = ', accuracy_score(Y_test, Y_pred) * 100, '%')
code
104126055/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/drug-classification/drug200.csv') dataset['Sex'].unique()
code