path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106198134/cell_17
[ "text_html_output_1.png" ]
clean_names(dailyActivity)
code
106198134/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
library(tidyverse) library(readr) library(here) library(skimr) library(dplyr) library(janitor)
code
50242358/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.") print("_"*20) print(f"Unique cities count:\n{df['city'].value_counts()}") print("*"*50, end="\n\n") fig, ax = plt.subplots(figsize=(12, 6)) sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d") def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow): nunique = df.nunique() df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]] nRow, nCol = df.shape columnNames = list(df) nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow for i in range(min(nCol, nGraphShown)): columnDf = df.iloc[:, i] if not np.issubdtype(type(columnDf.iloc[0]), np.number): valueCounts = columnDf.value_counts() plt.xticks(rotation=90) plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0) corr = df.corr() plt.figure(num=None, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum=1) plt.xticks(range(len(corr.columns)), corr.columns, rotation=90) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix for all Transactions', fontsize=15) plt.savefig('./correlation.png') plt.show()
code
50242358/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['education_level'].dropna().unique())} unique education level data.") print('_' * 20) print(f"Unique education level:\n{df['education_level'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_9
[ "image_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['city_development_index'].dropna().unique())} unique city development indices.") print('_' * 20) print(f"Unique City Development Indices:\n{df['city_development_index'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) df.head()
code
50242358/cell_2
[ "text_plain_output_1.png" ]
import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50242358/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['relevent_experience'].dropna().unique())} unique relevant experience data.") print('_' * 20) print(f"Unique Experiences:\n{df['relevent_experience'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.") print("_"*20) print(f"Unique cities count:\n{df['city'].value_counts()}") print("*"*50, end="\n\n") fig, ax = plt.subplots(figsize=(12, 6)) sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d") def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow): nunique = df.nunique() df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]] nRow, nCol = df.shape columnNames = list(df) nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow plt.figure(num=None, figsize=(6 * nGraphPerRow, 8 * nGraphRow), dpi=80, facecolor='w', edgecolor='k') for i in range(min(nCol, nGraphShown)): plt.subplot(nGraphRow, nGraphPerRow, i + 1) columnDf = df.iloc[:, i] if not np.issubdtype(type(columnDf.iloc[0]), np.number): valueCounts = columnDf.value_counts() valueCounts.plot.bar() else: columnDf.hist() plt.ylabel('counts') plt.xticks(rotation=90) plt.title(f'{columnNames[i]} (column {i})') plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0) plt.show() plotPerColumnDistribution(df, 10, 5)
code
50242358/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f'Dataset has {df.shape[0]} rows and {df.shape[1]} columns.') print('*' * 50, end='\n\n') print(f"Dataset has {len(df['enrollee_id'].dropna().unique())} unique user's data.") print('*' * 50, end='\n\n')
code
50242358/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.") print('_' * 20) print(f"Unique cities count:\n{df['city'].value_counts()}") print('*' * 50, end='\n\n') fig, ax = plt.subplots(figsize=(12, 6)) sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=0.2, palette='Blues_d')
code
50242358/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['experience'].dropna().unique())} unique experience data.") print('_' * 20) print(f"Unique experiences:\n{df['experience'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['company_size'].dropna().unique())} unique company sizes.") print('_' * 20) print(f"Unique company sizes:\n{df['company_size'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['company_type'].dropna().unique())} unique company types.") print('_' * 20) print(f"Unique company types:\n{df['company_type'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['major_discipline'].dropna().unique())} unique major discipline data.") print('_' * 20) print(f"Unique major discipline:\n{df['major_discipline'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['gender'].dropna().unique())} unique gender's data.") print('_' * 20) print(f"Unique Gender counts:\n{df['gender'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) print(f"Dataset has {len(df['enrolled_university'].dropna().unique())} unique enrolled university data.") print('_' * 20) print(f"Unique enrolled university:\n{df['enrolled_university'].value_counts()}") print('*' * 50, end='\n\n')
code
50242358/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd pd.set_option('display.max_columns', 50) import os from scipy import stats import matplotlib.pyplot as plt import seaborn as sns TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv' TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv' df = pd.read_csv(TRAIN_PATH) df['relevent_experience'].dropna().unique()
code
2029228/cell_4
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer def draw_plot(data, start_range, end_range, total_data, bar_color, chart_title): plt.rcdefaults() fig, ax = plt.subplots() data_filtered = data['title'][(data['score'] > start_range) & (data['score'] < end_range)].drop_duplicates() cv = CountVectorizer(stop_words='english') cv_fit = cv.fit_transform(data_filtered) data_frame = {'Name': cv.get_feature_names(), 'Freq': cv_fit.toarray().sum(axis=0)} data_graph = pd.DataFrame(data_frame).sort_values(by=['Freq'], ascending=False)[0:total_data] objects = data_graph['Name'].values.tolist() y_pos = np.arange(len(data_graph['Name'])) frequency = data_graph['Freq'].values.tolist() ax.barh(y_pos, frequency, align='center', color=bar_color, ecolor='black', alpha=0.5) ax.set_yticks(y_pos) ax.set_yticklabels(objects) ax.invert_yaxis() ax.set_xlabel('Frequency') ax.set_title(chart_title) plt.show() file = pd.read_csv('../input/ign.csv') draw_plot(file, 0, 10.1, 20, 'black', 'What is the popular words?')
code
2029228/cell_6
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer def draw_plot(data, start_range, end_range, total_data, bar_color, chart_title): plt.rcdefaults() fig, ax = plt.subplots() data_filtered = data['title'][(data['score'] > start_range) & (data['score'] < end_range)].drop_duplicates() cv = CountVectorizer(stop_words='english') cv_fit = cv.fit_transform(data_filtered) data_frame = {'Name': cv.get_feature_names(), 'Freq': cv_fit.toarray().sum(axis=0)} data_graph = pd.DataFrame(data_frame).sort_values(by=['Freq'], ascending=False)[0:total_data] objects = data_graph['Name'].values.tolist() y_pos = np.arange(len(data_graph['Name'])) frequency = data_graph['Freq'].values.tolist() ax.barh(y_pos, frequency, align='center', color=bar_color, ecolor='black', alpha=0.5) ax.set_yticks(y_pos) ax.set_yticklabels(objects) ax.invert_yaxis() ax.set_xlabel('Frequency') ax.set_title(chart_title) plt.show() file = pd.read_csv('../input/ign.csv') draw_plot(file, 9.4, 10.1, 20, 'blue', 'What made the masterpieces?')
code
2029228/cell_8
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer def draw_plot(data, start_range, end_range, total_data, bar_color, chart_title): plt.rcdefaults() fig, ax = plt.subplots() data_filtered = data['title'][(data['score'] > start_range) & (data['score'] < end_range)].drop_duplicates() cv = CountVectorizer(stop_words='english') cv_fit = cv.fit_transform(data_filtered) data_frame = {'Name': cv.get_feature_names(), 'Freq': cv_fit.toarray().sum(axis=0)} data_graph = pd.DataFrame(data_frame).sort_values(by=['Freq'], ascending=False)[0:total_data] objects = data_graph['Name'].values.tolist() y_pos = np.arange(len(data_graph['Name'])) frequency = data_graph['Freq'].values.tolist() ax.barh(y_pos, frequency, align='center', color=bar_color, ecolor='black', alpha=0.5) ax.set_yticks(y_pos) ax.set_yticklabels(objects) ax.invert_yaxis() ax.set_xlabel('Frequency') ax.set_title(chart_title) plt.show() file = pd.read_csv('../input/ign.csv') draw_plot(file, 5.9, 9.5, 20, 'green', 'What is the Okay and above?')
code
2029228/cell_3
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer def draw_plot(data, start_range, end_range, total_data, bar_color, chart_title): plt.rcdefaults() fig, ax = plt.subplots() data_filtered = data['title'][(data['score'] > start_range) & (data['score'] < end_range)].drop_duplicates() cv = CountVectorizer(stop_words='english') cv_fit = cv.fit_transform(data_filtered) data_frame = {'Name': cv.get_feature_names(), 'Freq': cv_fit.toarray().sum(axis=0)} data_graph = pd.DataFrame(data_frame).sort_values(by=['Freq'], ascending=False)[0:total_data] objects = data_graph['Name'].values.tolist() y_pos = np.arange(len(data_graph['Name'])) frequency = data_graph['Freq'].values.tolist() ax.barh(y_pos, frequency, align='center', color=bar_color, ecolor='black', alpha=0.5) ax.set_yticks(y_pos) ax.set_yticklabels(objects) ax.invert_yaxis() ax.set_xlabel('Frequency') ax.set_title(chart_title) plt.show() file = pd.read_csv('../input/ign.csv') file.head()
code
2029228/cell_10
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer def draw_plot(data, start_range, end_range, total_data, bar_color, chart_title): plt.rcdefaults() fig, ax = plt.subplots() data_filtered = data['title'][(data['score'] > start_range) & (data['score'] < end_range)].drop_duplicates() cv = CountVectorizer(stop_words='english') cv_fit = cv.fit_transform(data_filtered) data_frame = {'Name': cv.get_feature_names(), 'Freq': cv_fit.toarray().sum(axis=0)} data_graph = pd.DataFrame(data_frame).sort_values(by=['Freq'], ascending=False)[0:total_data] objects = data_graph['Name'].values.tolist() y_pos = np.arange(len(data_graph['Name'])) frequency = data_graph['Freq'].values.tolist() ax.barh(y_pos, frequency, align='center', color=bar_color, ecolor='black', alpha=0.5) ax.set_yticks(y_pos) ax.set_yticklabels(objects) ax.invert_yaxis() ax.set_xlabel('Frequency') ax.set_title(chart_title) plt.show() file = pd.read_csv('../input/ign.csv') draw_plot(file, 0, 6.0, 20, 'red', 'The Worst')
code
129037105/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum()
code
129037105/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique()
code
129037105/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum()
code
129037105/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.describe()
code
129037105/cell_55
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # it's is core library for numeric and scientific computing import numpy as np # linear algebra import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() col = ['Age', 'Vintage', 'Avg_Account_Balance'] col_to_transform = ['Age', 'Vintage', 'Avg_Account_Balance'] df[col_to_transform] = df[col_to_transform].apply(lambda x: np.log(x)) # visualize the transformed data using histograms fig,axes = plt.subplots(nrows=1 ,ncols=len(col_to_transform),figsize=(15,5)) for i,col in enumerate(col_to_transform): axes[i].hist(df[col]) axes[i].set_xlabel(f'log({col})') axes[i].set_ylabel('Frequency') plt.show() plt.figure(figsize=(10, 8)) sns.countplot(x='Gender', hue='Credit_Product', data=df)
code
129037105/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum()
code
129037105/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() plt.figure(figsize=(30, 10)) sns.boxplot(df['Avg_Account_Balance'])
code
129037105/cell_52
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # it's is core library for numeric and scientific computing import numpy as np # linear algebra import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() col = ['Age', 'Vintage', 'Avg_Account_Balance'] col_to_transform = ['Age', 'Vintage', 'Avg_Account_Balance'] df[col_to_transform] = df[col_to_transform].apply(lambda x: np.log(x)) # visualize the transformed data using histograms fig,axes = plt.subplots(nrows=1 ,ncols=len(col_to_transform),figsize=(15,5)) for i,col in enumerate(col_to_transform): axes[i].hist(df[col]) axes[i].set_xlabel(f'log({col})') axes[i].set_ylabel('Frequency') plt.show() plt.figure(figsize=(10, 8)) sns.countplot(x='Gender', hue='Is_Active', data=df)
code
129037105/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129037105/cell_45
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() col = ['Age', 'Vintage', 'Avg_Account_Balance'] df[col].hist(bins=50, figsize=(20, 15)) plt.show()
code
129037105/cell_49
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # it's is core library for numeric and scientific computing import numpy as np # linear algebra import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() col = ['Age', 'Vintage', 'Avg_Account_Balance'] col_to_transform = ['Age', 'Vintage', 'Avg_Account_Balance'] df[col_to_transform] = df[col_to_transform].apply(lambda x: np.log(x)) fig, axes = plt.subplots(nrows=1, ncols=len(col_to_transform), figsize=(15, 5)) for i, col in enumerate(col_to_transform): axes[i].hist(df[col]) axes[i].set_xlabel(f'log({col})') axes[i].set_ylabel('Frequency') plt.show()
code
129037105/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.info()
code
129037105/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum()
code
129037105/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape
code
129037105/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() sns.pairplot(df)
code
129037105/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() df['Is_Lead'].value_counts()
code
129037105/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size
code
129037105/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.head()
code
129037105/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # it's is core library for data manipulation and data analysis import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/credit-card-buyers/train data credit card.csv') df.size df.shape df.drop('ID', axis=1, inplace=True) df.nunique() df.isnull().sum() df.isnull().sum() df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() plt.figure(figsize=(5, 5)) df['Is_Lead'].value_counts().plot(kind='pie', autopct='%1.1f%%') plt.title('Is_Lead', size=20)
code
33107227/cell_21
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') print(scores) mean_score = scores.mean() print(mean_score)
code
33107227/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape
code
33107227/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_version'].value_counts()
code
33107227/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_
code
33107227/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df.head()
code
33107227/cell_30
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_ knn_grid.best_params_ knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(knn, knn_params, scoring='accuracy', cv=5) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_
code
33107227/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['device_brand'].value_counts()
code
33107227/cell_29
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_ knn_grid.best_params_ knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(knn, knn_params, scoring='accuracy', cv=5) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_
code
33107227/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_ knn_grid.best_params_
code
33107227/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.head()
code
33107227/cell_19
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) pd.DataFrame(confusion_matrix(y_valid, y_pred), index=['True_' + str(i + 1) for i in range(6)], columns=['Pred' + str(i + 1) for i in range(6)])
code
33107227/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33107227/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_vendor'].value_counts()
code
33107227/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) confusion_matrix(y_valid, y_pred)
code
33107227/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_name'].value_counts()
code
33107227/cell_15
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train)
code
33107227/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') vodafone_subset_6.head(10)
code
33107227/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) accuracy_score(y_pred, y_valid)
code
33107227/cell_24
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_
code
33107227/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train)
code
33107227/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['device_type_rus'].value_counts()
code
33107227/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape X = df_1.drop('target', axis=1) y = df_1['target'] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=1) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) model_knn.fit(X_train, y_train) y_pred = model_knn.predict(X_valid) accuracy_score(y_pred, y_valid) kf = KFold(n_splits=5, shuffle=True, random_state=22) model_knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(model_knn, X, y, cv=kf, scoring='accuracy') mean_score = scores.mean() knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(model_knn, knn_params, scoring='accuracy', cv=kf) knn_grid.fit(X_train, y_train) knn_grid.best_estimator_ knn_grid.best_score_ knn_grid.best_params_ knn_params = {'n_neighbors': np.arange(1, 51)} knn_grid = GridSearchCV(knn, knn_params, scoring='accuracy', cv=5) knn_grid.fit(X_train, y_train)
code
33107227/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts()
code
33107227/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df.info()
code
128015258/cell_9
[ "text_plain_output_1.png" ]
prompt2 = "Task Description:\nIn this task, your goal is to convert a given sentence grounded in the input table schema into a question whose answer can be the given sentence. You should use the table schema provided to generate the question.\n\nNow complete the following:\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput:\n" len(prompt2)
code
128015258/cell_2
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, OPTForCausalLM from transformers import AutoTokenizer, OPTForCausalLM model = OPTForCausalLM.from_pretrained('facebook/opt-350m') tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m') prompt = 'Hey, are you consciours? Can you talk to me?'
code
128015258/cell_11
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
prompt2 = "Task Description:\nIn this task, your goal is to convert a given sentence grounded in the input table schema into a question whose answer can be the given sentence. You should use the table schema provided to generate the question.\n\nNow complete the following:\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput:\n" print(generate(prompt2))
code
128015258/cell_1
[ "text_plain_output_1.png" ]
pip install transformers
code
128015258/cell_7
[ "text_plain_output_1.png" ]
prompt1 = "Task Description:\nIn this task, your goal is to convert a given sentence grounded in the input table schema into a question whose answer can be the given sentence. You should use the table schema provided to generate the question.\n\nExamples:\n\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt was born in Los Angeles where he currently resides.\nOutput: Where was Chris Pratt born and where does he currently reside?\n\nTable Schema: 'title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established', 'Disestablished'\nInput: Kara-Khanid Khanate had many people that were Arabic.\nOutput: Did Kara-Khanid Khanate have many people who were Arabic?\n\nTable Schema: 'Author', 'Publication date', 'Genre', 'Publisher', 'Pages', 'ISBN'\nInput: J.K. Rowling's Harry Potter and the Philosopher's Stone was published by Bloomsbury in 1997, and it has 223 pages.\nOutput: What is the author, publication date, publisher, and number of pages of Harry Potter and the Philosopher's Stone?\n\nNow complete the following:\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput: \n" len(prompt1)
code
128015258/cell_10
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
prompt1 = "Task Description:\nIn this task, your goal is to convert a given sentence grounded in the input table schema into a question whose answer can be the given sentence. You should use the table schema provided to generate the question.\n\nExamples:\n\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt was born in Los Angeles where he currently resides.\nOutput: Where was Chris Pratt born and where does he currently reside?\n\nTable Schema: 'title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established', 'Disestablished'\nInput: Kara-Khanid Khanate had many people that were Arabic.\nOutput: Did Kara-Khanid Khanate have many people who were Arabic?\n\nTable Schema: 'Author', 'Publication date', 'Genre', 'Publisher', 'Pages', 'ISBN'\nInput: J.K. Rowling's Harry Potter and the Philosopher's Stone was published by Bloomsbury in 1997, and it has 223 pages.\nOutput: What is the author, publication date, publisher, and number of pages of Harry Potter and the Philosopher's Stone?\n\nNow complete the following:\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput: \n" print(generate(prompt1))
code
128015258/cell_5
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, OPTForCausalLM from transformers import AutoTokenizer, OPTForCausalLM model = OPTForCausalLM.from_pretrained('facebook/opt-350m') tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m') prompt = 'Hey, are you consciours? Can you talk to me?' generate(prompt)
code
74058313/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.fillna({'reviews_per_month': 0}, inplace=True) airbnb['price'] = airbnb['price'].replace('[$,]', '', regex=True).astype(float) airbnb = airbnb.loc[airbnb['price'] > 0] airbnb.host_is_superhost.fillna('f', inplace=True) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'f': 0}) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'t': 1}) airbnb.host_identity_verified.fillna('f', inplace=True) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'f': 0}) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'t': 1}) airbnb['available'] = airbnb['available'].replace({'f': 0}) airbnb['available'] = airbnb['available'].replace({'t': 1}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'f': 0}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'t': 1}) airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 30000)] p = airbnb.price.value_counts().sum() print('total de registros ate 30000') print(p) print('-----------------------------') l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] print(l1['price'].value_counts().sum() / p * 100) l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 15000)] print(l2['price'].value_counts().sum() / p * 100) l3 = airbnb[airbnb['price'] > 15000] print(l3['price'].value_counts().sum() / p * 100) min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() print('Minimum price per listing is %d$.' % min_price) print('Maximum price per listing is %d$' % max_price) print('Average price per listing is %d$.' % mean_price) airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 15000)] print('total de registros ate 15000') p = airbnb.price.value_counts().sum() print(p) print('-----------------------------') min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() print('Minimum price per listing is %d$.' % min_price) print('Maximum price per listing is %d$' % max_price) print('Average price per listing is %d$.' % mean_price) l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 10000)] l3 = airbnb[airbnb['price'] > 10000]
code
74058313/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.info() print(airbnb.isna().sum())
code
74058313/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.fillna({'reviews_per_month': 0}, inplace=True) airbnb['price'] = airbnb['price'].replace('[$,]', '', regex=True).astype(float) airbnb = airbnb.loc[airbnb['price'] > 0] airbnb.host_is_superhost.fillna('f', inplace=True) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'f': 0}) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'t': 1}) airbnb.host_identity_verified.fillna('f', inplace=True) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'f': 0}) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'t': 1}) airbnb['available'] = airbnb['available'].replace({'f': 0}) airbnb['available'] = airbnb['available'].replace({'t': 1}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'f': 0}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'t': 1}) airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 30000)] p = airbnb.price.value_counts().sum() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 15000)] l3 = airbnb[airbnb['price'] > 15000] min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 15000)] p = airbnb.price.value_counts().sum() min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 10000)] l3 = airbnb[airbnb['price'] > 10000] mean_price_for_listing = airbnb.groupby('id').mean()['price'] plt.xticks(np.arange(800, 15000, step=1000)) f, ax = plt.subplots(figsize=(8, 3)) subplot(2, 3, 1) sns.boxplot(y=l1['price']) subplot(2, 3, 2) sns.boxplot(y=l2['price']) subplot(2, 3, 3) sns.boxplot(y=l3['price']) plt.tight_layout() plt.draw() plt.show()
code
74058313/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import matplotlib.pyplot as plt import seaborn as sns from pylab import * warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74058313/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.fillna({'reviews_per_month': 0}, inplace=True) airbnb['price'] = airbnb['price'].replace('[$,]', '', regex=True).astype(float) airbnb = airbnb.loc[airbnb['price'] > 0] airbnb.host_is_superhost.fillna('f', inplace=True) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'f': 0}) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'t': 1}) airbnb.host_identity_verified.fillna('f', inplace=True) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'f': 0}) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'t': 1}) airbnb['available'] = airbnb['available'].replace({'f': 0}) airbnb['available'] = airbnb['available'].replace({'t': 1}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'f': 0}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'t': 1}) print(airbnb.isna().sum())
code
74058313/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.fillna({'reviews_per_month': 0}, inplace=True) airbnb['price'] = airbnb['price'].replace('[$,]', '', regex=True).astype(float) airbnb = airbnb.loc[airbnb['price'] > 0] airbnb.host_is_superhost.fillna('f', inplace=True) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'f': 0}) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'t': 1}) airbnb.host_identity_verified.fillna('f', inplace=True) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'f': 0}) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'t': 1}) airbnb['available'] = airbnb['available'].replace({'f': 0}) airbnb['available'] = airbnb['available'].replace({'t': 1}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'f': 0}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'t': 1}) airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 30000)] p = airbnb.price.value_counts().sum() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 15000)] l3 = airbnb[airbnb['price'] > 15000] min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 15000)] p = airbnb.price.value_counts().sum() min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 10000)] l3 = airbnb[airbnb['price'] > 10000] mean_price_for_listing = airbnb.groupby('id').mean()['price'] plt.figure(figsize=(20, 5)) plt.hist(mean_price_for_listing, bins=20) plt.xticks(np.arange(800, 15000, step=1000)) plt.ylabel('Number of listings') plt.xlabel('Price, $') plt.title('Number of listings depending on price') plt.savefig('Price distrubution.png') plt.show()
code
74058313/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) listings = pd.read_csv('/kaggle/input/airbnb-buenosaires/listings.csv') calendar = pd.read_csv('/kaggle/input/airbnb-buenosaires/calendar.csv') listings = listings.loc[:, ['id', 'host_is_superhost', 'host_identity_verified', 'neighbourhood_cleansed', 'room_type', 'price', 'minimum_nights', 'availability_365', 'number_of_reviews', 'instant_bookable', 'accommodates', 'bathrooms_text', 'beds', 'calculated_host_listings_count', 'reviews_per_month', 'price', 'minimum_nights', 'maximum_nights']] airbnb = listings.merge(calendar, how='inner', left_on='id', right_on='listing_id').sample(frac=0.1) airbnb.drop(columns=['price_x', 'minimum_nights_x', 'maximum_nights_x', 'listing_id', 'adjusted_price'], inplace=True) airbnb.rename(columns={'price_y': 'price', 'minimum_nights_y': 'minimum_nights', 'maximum_nights_y': 'maximum_nights'}, inplace=True) airbnb.fillna({'reviews_per_month': 0}, inplace=True) airbnb['price'] = airbnb['price'].replace('[$,]', '', regex=True).astype(float) airbnb = airbnb.loc[airbnb['price'] > 0] airbnb.host_is_superhost.fillna('f', inplace=True) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'f': 0}) airbnb['host_is_superhost'] = airbnb['host_is_superhost'].replace({'t': 1}) airbnb.host_identity_verified.fillna('f', inplace=True) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'f': 0}) airbnb['host_identity_verified'] = airbnb['host_identity_verified'].replace({'t': 1}) airbnb['available'] = airbnb['available'].replace({'f': 0}) airbnb['available'] = airbnb['available'].replace({'t': 1}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'f': 0}) airbnb['instant_bookable'] = airbnb['instant_bookable'].replace({'t': 1}) airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 30000)] p = airbnb.price.value_counts().sum() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 15000)] l3 = airbnb[airbnb['price'] > 15000] min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() airbnb = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 15000)] p = airbnb.price.value_counts().sum() min_price = airbnb['price'].min() max_price = airbnb['price'].max() mean_price = airbnb['price'].mean() l1 = airbnb[(airbnb['price'] > 900) & (airbnb['price'] <= 5000)] l2 = airbnb[(airbnb['price'] > 5000) & (airbnb['price'] <= 10000)] l3 = airbnb[airbnb['price'] > 10000] mean_price_for_listing = airbnb.groupby('id').mean()['price'] plt.xticks(np.arange(800, 15000, step=1000)) print(airbnb.bathrooms_text.unique()) airbnb['shared_bathrooms'] = airbnb['bathrooms_text'].where(airbnb['bathrooms_text'].str.contains('shared') == True) airbnb.shared_bathrooms.fillna(0, inplace=True) airbnb['shared_bathrooms'] = airbnb['shared_bathrooms'].replace('[ shared bath| shared baths]', '', regex=True).astype(float) airbnb['private_bathrooms'] = airbnb['bathrooms_text'].where(airbnb['bathrooms_text'].str.contains('shared') == False) airbnb.private_bathrooms.fillna(0, inplace=True) airbnb['private_bathrooms'] = airbnb['private_bathrooms'].replace('[ private bath| bath| baths,]', '', regex=True) airbnb['private_bathrooms'] = airbnb['private_bathrooms'].replace('[Plf-|Hlf-|Sdlf-]', '', regex=True) airbnb['private_bathrooms'] = airbnb['private_bathrooms'].replace('', 0, regex=True).astype(float) print(airbnb.private_bathrooms.unique()) airbnb.drop(columns=['bathrooms_text'], inplace=True) print(airbnb.isna().sum())
code
106214297/cell_20
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV,train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked']) num_pipeline = Pipeline([('imputer', Imputer), ('scaler', ss)]) num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred = full_pipeline.fit_transform(drop_train_data) data = pd.DataFrame(data_pred) from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.ensemble import RandomForestClassifier sgd = SGDClassifier() RandForest = RandomForestClassifier() param_sgd = {'max_iter': [400, 600, 1000, 1500, 2000], 'loss': ['hinge', 'modified_huber'], 'penalty': ['l2', 'l1', 'elasticnet']} grid_search_sgd = GridSearchCV(sgd, param_sgd) grid_search_sgd.fit(x_train, y_train) final_sgd = grid_search_sgd.best_estimator_ final_sgd.fit(x_train, y_train) y_pred_sgd = final_sgd.predict(x_test) accuracy_score(y_test, y_pred_sgd) test_data = pd.read_csv('../input/titanic/test.csv') drop_test_data = test_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) drop_test_data['Family'] = drop_test_data['SibSp'] + drop_test_data['Parch'] drop_test_data num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred_test = full_pipeline.fit_transform(drop_test_data) data_pred_test.shape final_sgd.predict(data_pred_test)
code
106214297/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys()
code
106214297/cell_19
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked']) num_pipeline = Pipeline([('imputer', Imputer), ('scaler', ss)]) num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred = full_pipeline.fit_transform(drop_train_data) data = pd.DataFrame(data_pred) test_data = pd.read_csv('../input/titanic/test.csv') drop_test_data = test_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) drop_test_data['Family'] = drop_test_data['SibSp'] + drop_test_data['Parch'] drop_test_data num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred_test = full_pipeline.fit_transform(drop_test_data) data_pred_test.shape
code
106214297/cell_18
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked']) num_pipeline = Pipeline([('imputer', Imputer), ('scaler', ss)]) num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred = full_pipeline.fit_transform(drop_train_data) data = pd.DataFrame(data_pred) test_data = pd.read_csv('../input/titanic/test.csv') drop_test_data = test_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) drop_test_data['Family'] = drop_test_data['SibSp'] + drop_test_data['Parch'] drop_test_data
code
106214297/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score from sklearn.ensemble import VotingClassifier voting = VotingClassifier(estimators=[('sgd', SGDClassifier()), ('randForest', RandomForestClassifier())], voting='hard') voting.fit(x_train, y_train) y_pred_voting = voting.predict(x_test) accuracy_score(y_test, y_pred_voting)
code
106214297/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr()
code
106214297/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV,train_test_split from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.ensemble import RandomForestClassifier sgd = SGDClassifier() RandForest = RandomForestClassifier() param_sgd = {'max_iter': [400, 600, 1000, 1500, 2000], 'loss': ['hinge', 'modified_huber'], 'penalty': ['l2', 'l1', 'elasticnet']} grid_search_sgd = GridSearchCV(sgd, param_sgd) grid_search_sgd.fit(x_train, y_train) final_sgd = grid_search_sgd.best_estimator_ final_sgd.fit(x_train, y_train) y_pred_sgd = final_sgd.predict(x_test) accuracy_score(y_test, y_pred_sgd)
code
106214297/cell_10
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV,train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked']) num_pipeline = Pipeline([('imputer', Imputer), ('scaler', ss)]) num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred = full_pipeline.fit_transform(drop_train_data) data = pd.DataFrame(data_pred) from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.ensemble import RandomForestClassifier sgd = SGDClassifier() RandForest = RandomForestClassifier() x_train, x_test, y_train, y_test = train_test_split(data_pred, y, test_size=0.2) param_rand = {'n_estimators': [5, 10, 20], 'max_depth': [1, 5, 8, 10]} grid_search_rand = GridSearchCV(RandForest, param_rand, n_jobs=-1) grid_search_rand.fit(x_train, y_train) best_param_tree = grid_search_rand.best_estimator_ best_param_tree
code
106214297/cell_12
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV,train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked']) num_pipeline = Pipeline([('imputer', Imputer), ('scaler', ss)]) num_attrib = ['Fare', 'Age'] txt_attrib = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num', num_pipeline, num_attrib), ('txt_trans', OneHot, txt_attrib)]) data_pred = full_pipeline.fit_transform(drop_train_data) data = pd.DataFrame(data_pred) from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.ensemble import RandomForestClassifier sgd = SGDClassifier() RandForest = RandomForestClassifier() x_train, x_test, y_train, y_test = train_test_split(data_pred, y, test_size=0.2) param_rand = {'n_estimators': [5, 10, 20], 'max_depth': [1, 5, 8, 10]} grid_search_rand = GridSearchCV(RandForest, param_rand, n_jobs=-1) grid_search_rand.fit(x_train, y_train) best_param_tree = grid_search_rand.best_estimator_ best_param_tree final_rand = grid_search_rand.best_estimator_ final_rand.fit(x_train, y_train) y_pred_rand = final_rand.predict(x_test) accuracy_score(y_test, y_pred_rand)
code
106214297/cell_5
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler,OneHotEncoder import pandas as pd import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt ss = StandardScaler() OneHot = OneHotEncoder() Imputer = SimpleImputer() train_data = pd.read_csv('../input/titanic/train.csv') train_data.keys() drop_train_data = train_data.drop(['Name', 'PassengerId', 'Cabin', 'Ticket'], axis=1).dropna(axis=0) train_data['Family'] = train_data['SibSp'] + train_data['Parch'] train_data.corr() y = drop_train_data.pop('Survived') OneHot.fit_transform(drop_train_data['Embarked'])
code
2007984/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape)
code
2007984/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape
code
2007984/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary()
code
2007984/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32')
code
2007984/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential from keras.optimizers import Adam , RMSprop from keras.preprocessing import image from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape y_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train, num_classes=10) y_train.shape mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px x_train.reshape model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) from keras.preprocessing import image gen = image.ImageDataGenerator() X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34) batches = gen.flow(X_train, Y_train, batch_size=64) val_batches = gen.flow(X_val, Y_val, batch_size=64) cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n) model.optimizer.lr = 0.01 gen = image.ImageDataGenerator() batches = gen.flow(X_train, Y_train, batch_size=64) history = model.fit_generator(batches, batches.n, nb_epoch=1) preds = model.predict_classes(x_test, verbose=0) preds[0:5]
code
2007984/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential from keras.optimizers import Adam , RMSprop from keras.preprocessing import image from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape y_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train, num_classes=10) y_train.shape mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px x_train.reshape model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) from keras.preprocessing import image gen = image.ImageDataGenerator() X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34) batches = gen.flow(X_train, Y_train, batch_size=64) val_batches = gen.flow(X_val, Y_val, batch_size=64) cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n) cache.history
code
2007984/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential from keras.optimizers import Adam , RMSprop from keras.preprocessing import image from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape y_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train, num_classes=10) y_train.shape mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px x_train.reshape model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) from keras.preprocessing import image gen = image.ImageDataGenerator() X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34) batches = gen.flow(X_train, Y_train, batch_size=64) val_batches = gen.flow(X_val, Y_val, batch_size=64) cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n) model.optimizer.lr = 0.01 gen = image.ImageDataGenerator() batches = gen.flow(X_train, Y_train, batch_size=64) history = model.fit_generator(batches, batches.n, nb_epoch=1) preds = model.predict_classes(x_test, verbose=0)
code
2007984/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') y_train.shape
code
2007984/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential from keras.optimizers import Adam , RMSprop from keras.preprocessing import image from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape y_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train, num_classes=10) y_train.shape mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px x_train.reshape model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) from keras.preprocessing import image gen = image.ImageDataGenerator() X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34) batches = gen.flow(X_train, Y_train, batch_size=64) val_batches = gen.flow(X_val, Y_val, batch_size=64) cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n)
code
2007984/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense,Dropout, Activation,Lambda,Flatten from keras.models import Sequential from keras.optimizers import Adam , RMSprop import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train[:, 1:].values.astype('float32') y_train = train[:, 0].values.astype('int32') x_test = test.values.astype('float32') x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28) x_train.shape x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) (x_train.shape, x_test.shape) mean_px = x_train.mean().astype(np.float32) std_px = x_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px model = Sequential() model.add(Lambda(standardize, input_shape=(28, 28, 1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.summary() model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
code