path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73083438/cell_11 | [
"text_html_output_1.png"
] | from termcolor import colored
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
print('Number of numerical columns is:', colored(len(num_col), 'green'), '\nNumber of categorical columsn is:', colored(len(cat_cols), 'green')) | code |
73083438/cell_7 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.info() | code |
73083438/cell_18 | [
"text_plain_output_1.png"
] | from termcolor import colored
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
list(test.columns) == list(features.columns)
test.isnull().sum()
lis = []
for i in features[cat_cols].columns:
test_vals = set(test[i].unique())
train_vals = set(features[i].unique())
lis.append(test_vals.issubset(train_vals))
print(colored(all(lis), 'green')) | code |
73083438/cell_28 | [
"text_plain_output_1.png"
] | from termcolor import colored
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
list(test.columns) == list(features.columns)
test.isnull().sum()
fig = plt.figure(figsize=(10,5))
sns.barplot(y=train[cat_cols].nunique().values, x=train[cat_cols].nunique().index, color='blue', alpha=.5)
plt.xticks(rotation=0)
plt.title('Number of categorical unique values',fontsize=16);
fig = plt.figure(figsize=(26,10))
grid = gridspec.GridSpec(2,5,figure=fig,hspace=.2,wspace=.2)
n =0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
order = list(train['cat'+str(n)].value_counts().index)
sns.countplot(data= train, x='cat'+str(n),ax=ax, alpha =0.8,order=order,palette='viridis')
ax.set_title('cat'+str(n),fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Train categorical features unique values count', fontsize=16,y=.93);
fig = plt.figure(figsize=(26, 10))
grid = gridspec.GridSpec(2, 5, figure=fig, hspace=0.2, wspace=0.2)
n = 0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
order = list(test['cat' + str(n)].value_counts().index)
sns.countplot(data=test, x='cat' + str(n), ax=ax, alpha=0.8, order=order, palette='viridis')
ax.set_title('cat' + str(n), fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Test categorical features unique values count', fontsize=16, y=0.93) | code |
73083438/cell_8 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum() | code |
73083438/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
list(test.columns) == list(features.columns)
test.info() | code |
73083438/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
list(test.columns) == list(features.columns)
test.isnull().sum() | code |
73083438/cell_31 | [
"text_plain_output_1.png"
] | from termcolor import colored
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
list(test.columns) == list(features.columns)
test.isnull().sum()
fig = plt.figure(figsize=(10,5))
sns.barplot(y=train[cat_cols].nunique().values, x=train[cat_cols].nunique().index, color='blue', alpha=.5)
plt.xticks(rotation=0)
plt.title('Number of categorical unique values',fontsize=16);
fig = plt.figure(figsize=(26,10))
grid = gridspec.GridSpec(2,5,figure=fig,hspace=.2,wspace=.2)
n =0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
order = list(train['cat'+str(n)].value_counts().index)
sns.countplot(data= train, x='cat'+str(n),ax=ax, alpha =0.8,order=order,palette='viridis')
ax.set_title('cat'+str(n),fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Train categorical features unique values count', fontsize=16,y=.93);
fig = plt.figure(figsize=(26,10))
grid = gridspec.GridSpec(2,5,figure=fig,hspace=.2,wspace=.2)
n =0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
order = list(test['cat'+str(n)].value_counts().index)
sns.countplot(data= test, x='cat'+str(n),ax=ax, alpha =0.8,order=order,palette='viridis')
ax.set_title('cat'+str(n),fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Test categorical features unique values count', fontsize=16,y=.93);
fig = plt.figure(figsize=(26, 10))
grid = gridspec.GridSpec(2, 5, figure=fig, hspace=0.2, wspace=0.2)
n = 0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
sns.barplot(data=train, y='target', x='cat' + str(n), ax=ax, alpha=0.6, ci=95, color='darkblue', dodge=False)
ax.set_title('cat' + str(n), fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Distribution of categorical features unique values and target', fontsize=16, y=0.93) | code |
73083438/cell_24 | [
"text_html_output_1.png"
] | from termcolor import colored
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
fig = plt.figure(figsize=(10, 5))
sns.barplot(y=train[cat_cols].nunique().values, x=train[cat_cols].nunique().index, color='blue', alpha=0.5)
plt.xticks(rotation=0)
plt.title('Number of categorical unique values', fontsize=16) | code |
73083438/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
list(test.columns) == list(features.columns)
test.describe() | code |
73083438/cell_27 | [
"text_plain_output_1.png"
] | from termcolor import colored
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.isnull().sum()
features = train.drop(['target'], axis=1)
num_col = list(train.select_dtypes(include='float64').columns)
cat_cols = list(train.select_dtypes(include='object').columns)
num_col.remove('target')
fig = plt.figure(figsize=(10,5))
sns.barplot(y=train[cat_cols].nunique().values, x=train[cat_cols].nunique().index, color='blue', alpha=.5)
plt.xticks(rotation=0)
plt.title('Number of categorical unique values',fontsize=16);
fig = plt.figure(figsize=(26, 10))
grid = gridspec.GridSpec(2, 5, figure=fig, hspace=0.2, wspace=0.2)
n = 0
for i in range(2):
for j in range(5):
ax = fig.add_subplot(grid[i, j])
order = list(train['cat' + str(n)].value_counts().index)
sns.countplot(data=train, x='cat' + str(n), ax=ax, alpha=0.8, order=order, palette='viridis')
ax.set_title('cat' + str(n), fontsize=14)
ax.set_xlabel('')
ax.set_ylabel('')
n += 1
fig.suptitle('Train categorical features unique values count', fontsize=16, y=0.93) | code |
73083438/cell_5 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.head() | code |
34133665/cell_6 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34133665/cell_17 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMRegressor
from math import sqrt
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import pandas as pd
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv', index_col=[0])
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv', index_col=[0])
sample = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv', index_col=[0])
train_clean = train.drop(columns=['MiscFeature', 'Fence', 'PoolQC', 'FireplaceQu', 'Alley'])
X = train_clean.drop(columns=['SalePrice'])
y = train_clean[['SalePrice']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
num_feat = X_train.select_dtypes(include='number').columns.to_list()
cat_feat = X_train.select_dtypes(exclude='number').columns.to_list()
num_pipe = Pipeline([('imputer', SimpleImputer(strategy='mean')), ('scaler', StandardScaler())])
cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', OneHotEncoder(handle_unknown='ignore'))])
ct = ColumnTransformer(remainder='drop', transformers=[('numerical', num_pipe, num_feat), ('categorical', cat_pipe, cat_feat)])
model = Pipeline([('transformer', ct), ('predictor', LGBMRegressor())])
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
print('In sample error: ', sqrt(mean_squared_log_error(y_pred_train, y_train)))
print('Out sample error: ', sqrt(mean_squared_log_error(y_pred_test, y_test))) | code |
34133665/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv', index_col=[0])
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv', index_col=[0])
sample = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv', index_col=[0])
train_clean = train.drop(columns=['MiscFeature', 'Fence', 'PoolQC', 'FireplaceQu', 'Alley'])
X = train_clean.drop(columns=['SalePrice'])
y = train_clean[['SalePrice']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) | code |
74062774/cell_21 | [
"text_html_output_1.png"
] | from imblearn.over_sampling import SMOTE
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
train_smote_Y.value_counts().plot(kind='bar').set_xlabel('Cancelend') | code |
74062774/cell_4 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum() | code |
74062774/cell_23 | [
"text_html_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_ | code |
74062774/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import recall_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import recall_score
print('Recall for RF on test data: ', recall_score(test_Y, pred)) | code |
74062774/cell_33 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(test_Y, pred)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show() | code |
74062774/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
train_Y.value_counts().plot(kind='bar').set_xlabel('Cancelend') | code |
74062774/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data | code |
74062774/cell_29 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import accuracy_score
print('Accuracy for RF on test data: ', accuracy_score(test_Y, pred)) | code |
74062774/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import confusion_matrix
CF = confusion_matrix(test_Y, pred)
CF | code |
74062774/cell_2 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data | code |
74062774/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
data.info() | code |
74062774/cell_32 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import roc_auc_score
print('ROC for RF on test data: ', roc_auc_score(test_Y, pred)) | code |
74062774/cell_28 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import classification_report
target_names = ['Not Cancel', 'Cancel']
print(classification_report(test_Y, pred, target_names=target_names)) | code |
74062774/cell_3 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.info() | code |
74062774/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
data | code |
74062774/cell_31 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import precision_score
print('Precision for RF on test data: ', precision_score(test_Y, pred)) | code |
74062774/cell_22 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y) | code |
74062774/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sklearn.model_selection as ms
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
pd.set_option('display.max_columns', None)
data = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
data
data.isna().sum()
data = data.drop(['agent', 'company', 'required_car_parking_spaces', 'reservation_status', 'reservation_status_date', 'country', 'name', 'email', 'phone-number', 'credit_card'], axis=1)
data
a_month = {'arrival_date_month': {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}}
data.replace(a_month, inplace=True)
meal = {'meal': {'Undefined': 0, 'SC': 1, 'BB': 2, 'HB': 3, 'FB': 4}}
data.replace(meal, inplace=True)
segment = {'market_segment': {'Aviation': 1, 'Complementary': 2, 'Corporate': 3, 'Direct': 4, 'Groups': 5, 'Offline TA/TO': 6, 'Online TA': 7}}
data.replace(segment, inplace=True)
distribution = {'distribution_channel': {'GDS': 1, 'Corporate': 2, 'Direct': 3, 'TA/TO': 4}}
data.replace(distribution, inplace=True)
deposit = {'deposit_type': {'Refundable': 1, 'Non Refund': 0, 'No Deposit': 2}}
data.replace(deposit, inplace=True)
customer = {'customer_type': {'Contract': 2, 'Group': 3, 'Transient-Party': 1, 'Transient': 0}}
data.replace(customer, inplace=True)
import sklearn.model_selection as ms
train, test = ms.train_test_split(data, test_size=0.2, random_state=42)
train_X = train.drop(labels='is_canceled', axis=1)
train_Y = train['is_canceled']
test_X = test.drop(labels='is_canceled', axis=1)
test_Y = test['is_canceled']
from imblearn.over_sampling import SMOTE
os = SMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)
train_smote_X, train_smote_Y = os.fit_resample(train_X, train_Y)
train_smote_X = pd.DataFrame(data=train_smote_X, columns=train_X.columns)
train_smote_Y = pd.DataFrame(data=train_smote_Y)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
param_grid = {'n_estimators': (100, 1000, 2000), 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}
from sklearn.model_selection import GridSearchCV
CV_rf = GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=1, cv=2, verbose=1, return_train_score=True)
CV_rf.fit(train_smote_X, train_smote_Y)
CV_rf.best_params_
pred = CV_rf.predict(test_X)
from sklearn.metrics import confusion_matrix
CF = confusion_matrix(test_Y, pred)
CF
sns.heatmap(CF, annot=True, fmt='d') | code |
129008932/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000 | code |
129008932/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
wii_average_sales = over10000[over10000['Platform'] == 'Wii']['Global_Sales'].mean()
other_platforms_average_sales = over10000[over10000['Platform'] != 'Wii']['Global_Sales'].mean()
if wii_average_sales > other_platforms_average_sales:
print('The average number of sales for the Nintendo Wii is higher than all the other platforms.')
else:
print('The average number of sales for the Nintendo Wii is lower than all the other platforms.') | code |
129008932/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
over10000['Publisher'].value_counts().index[0] | code |
129008932/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129008932/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
top_selling_game_sales = over10000['NA_Sales'].max()
mean_sales = over10000['NA_Sales'].mean()
std_sales = over10000['NA_Sales'].std()
standard_deviations = (top_selling_game_sales - mean_sales) / std_sales
print("The top-selling game's sales for North America are", standard_deviations, 'standard deviations above the mean.') | code |
129008932/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
over10000['Platform'].value_counts().index[0] | code |
129008932/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
na_median_sales = over10000['NA_Sales'].median()
ten_games_surrounding_median = over10000[over10000['NA_Sales'].between(na_median_sales - 0.5, na_median_sales + 0.5)][['Name', 'NA_Sales']]
ten_games_surrounding_median = ten_games_surrounding_median.sort_values('NA_Sales', ascending=False)
print('Ten games surrounding the median sales output for North American video game sales:')
print(ten_games_surrounding_median) | code |
129008932/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df | code |
129008932/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
top_3_publishers_total_sales = over10000.groupby('Publisher')['Global_Sales'].sum().nlargest(3)
platform_sales = over10000.groupby('Platform')['Global_Sales'].sum().sort_values(ascending=False)
print('Global sales by platform:')
print(platform_sales) | code |
129008932/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
na_median_sales = over10000['NA_Sales'].median()
print('The median for North American video game sales is:', na_median_sales) | code |
129008932/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
top_3_publishers_total_sales = over10000.groupby('Publisher')['Global_Sales'].sum().nlargest(3)
print('Top 3 publishers with the highest total sales:')
print(top_3_publishers_total_sales) | code |
129008932/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
over10000['Genre'].value_counts().index[0] | code |
129008932/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
over10000 = df[df['Global_Sales'] > 0.01]
over10000
over10000[['Name', 'Global_Sales']].sort_values('Global_Sales', ascending=False)[0:20] | code |
128027348/cell_13 | [
"text_plain_output_1.png"
] | from gensim.models import keyedvectors
import gensim
from gensim.models import keyedvectors
w2v = keyedvectors.load_word2vec_format('/kaggle/input/tencent/tencent-ailab-embedding-zh-d100-v0.2.0-s/tencent-ailab-embedding-zh-d100-v0.2.0-s.txt', binary=False)
w2v[['的', '在']] | code |
128027348/cell_4 | [
"text_plain_output_1.png"
] | import io
import pandas as pd
root_path = '/kaggle/input/test-train'
train_path = '/kaggle/input/test-train/train_clean.txt'
import pandas as pd
import io
with open('/kaggle/input/test-train/train_clean.txt', 'r') as f:
train_text = f.read()
train_data = pd.read_csv(io.StringIO(train_text), sep='\t', names=['label', 'comment'])
train_data['comment'] = train_data['comment'].astype(str)
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x).split()))
comments_len
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x)))
comments_len
train_data['comments_len'] = comments_len
from collections import Counter
words = []
for i in range(len(train_data)):
com = train_data['comment'][i].split()
words = words + com
len(words) | code |
128027348/cell_6 | [
"text_plain_output_1.png"
] | from collections import Counter
import io
import os
import pandas as pd
root_path = '/kaggle/input/test-train'
train_path = '/kaggle/input/test-train/train_clean.txt'
import pandas as pd
import io
with open('/kaggle/input/test-train/train_clean.txt', 'r') as f:
train_text = f.read()
train_data = pd.read_csv(io.StringIO(train_text), sep='\t', names=['label', 'comment'])
train_data['comment'] = train_data['comment'].astype(str)
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x).split()))
comments_len
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x)))
comments_len
train_data['comments_len'] = comments_len
from collections import Counter
words = []
for i in range(len(train_data)):
com = train_data['comment'][i].split()
words = words + com
len(words)
Freq = 30
import os
with open(os.path.join('/kaggle/working/', 'word_freq.txt'), 'w', encoding='utf-8') as fout:
for word, freq in Counter(words).most_common():
if freq > Freq:
fout.write(word + '\n')
with open(os.path.join('/kaggle/working/', 'word_freq.txt'), encoding='utf-8') as fin:
vocab = [i.strip() for i in fin]
vocab = set(vocab)
word2idx = {i: index for index, i in enumerate(vocab)}
idx2word = {index: i for index, i in enumerate(vocab)}
vocab_size = len(vocab)
len(vocab) | code |
128027348/cell_2 | [
"text_plain_output_1.png"
] | import io
import pandas as pd
root_path = '/kaggle/input/test-train'
train_path = '/kaggle/input/test-train/train_clean.txt'
import pandas as pd
import io
with open('/kaggle/input/test-train/train_clean.txt', 'r') as f:
train_text = f.read()
train_data = pd.read_csv(io.StringIO(train_text), sep='\t', names=['label', 'comment'])
train_data['comment'] = train_data['comment'].astype(str)
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x).split()))
comments_len | code |
128027348/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | pad_id = 923
print(pad_id) | code |
128027348/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | print(len(data_input))
print(len(data_input[7])) | code |
128027348/cell_16 | [
"text_plain_output_1.png"
] | from collections import Counter
from gensim.models import keyedvectors
import io
import numpy as np
import os
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
root_path = '/kaggle/input/test-train'
train_path = '/kaggle/input/test-train/train_clean.txt'
import pandas as pd
import io
with open('/kaggle/input/test-train/train_clean.txt', 'r') as f:
train_text = f.read()
train_data = pd.read_csv(io.StringIO(train_text), sep='\t', names=['label', 'comment'])
train_data['comment'] = train_data['comment'].astype(str)
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x).split()))
comments_len
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x)))
comments_len
train_data['comments_len'] = comments_len
from collections import Counter
words = []
for i in range(len(train_data)):
com = train_data['comment'][i].split()
words = words + com
len(words)
Freq = 30
import os
with open(os.path.join('/kaggle/working/', 'word_freq.txt'), 'w', encoding='utf-8') as fout:
for word, freq in Counter(words).most_common():
if freq > Freq:
fout.write(word + '\n')
with open(os.path.join('/kaggle/working/', 'word_freq.txt'), encoding='utf-8') as fin:
vocab = [i.strip() for i in fin]
vocab = set(vocab)
word2idx = {i: index for index, i in enumerate(vocab)}
idx2word = {index: i for index, i in enumerate(vocab)}
vocab_size = len(vocab)
len(vocab)
pad_id = 923
sequence_length = 62
def tokenizer():
inputs = []
sentence_char = [str(i).split() for i in train_data['comment']]
for index, i in enumerate(sentence_char):
temp = [word2idx.get(j, pad_id) for j in i]
if len(i) < sequence_length:
for _ in range(sequence_length - len(i)):
temp.append(pad_id)
else:
temp = temp[:sequence_length]
inputs.append(temp)
return inputs
data_input = tokenizer()
import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
Embedding_size = 100
Batch_Size = 32
Kernel = 3
Filter_num = 20
Epoch = 100
Dropout = 0.5
Learning_rate = 0.001
class TextCNNDataSet(Data.Dataset):
def __init__(self, data_inputs, data_targets):
self.inputs = torch.LongTensor(data_inputs)
self.label = torch.LongTensor(data_targets)
def __getitem__(self, index):
return (self.inputs[index], self.label[index])
def __len__(self):
return len(self.inputs)
TextCNNDataSet = TextCNNDataSet(data_input, list(train_data['label']))
train_size = int(len(data_input) * 0.8)
test_size = int(len(data_input) * 0.15)
val_size = len(data_input) - train_size - test_size
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(TextCNNDataSet, [train_size, val_size, test_size])
TrainDataLoader = Data.DataLoader(train_dataset, batch_size=Batch_Size, shuffle=True)
TestDataLoader = Data.DataLoader(test_dataset, batch_size=Batch_Size, shuffle=True)
import gensim
from gensim.models import keyedvectors
w2v = keyedvectors.load_word2vec_format('/kaggle/input/tencent/tencent-ailab-embedding-zh-d100-v0.2.0-s/tencent-ailab-embedding-zh-d100-v0.2.0-s.txt', binary=False)
def word2vec(x):
x2v = np.ones((len(x), x.shape[1], Embedding_size))
for i in range(len(x)):
try:
x2v[i] = w2v[[idx2word[j.item()] for j in x[i]]]
except Exception as e:
x2v[i] = np.random.randn(62, 100)
return torch.tensor(x2v, dtype=torch.float32)
num_classs = 2
class TextCNN(nn.Module):
def __init__(self):
super(TextCNN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_size)
out_channel = Filter_num
self.conv = nn.Sequential(nn.Conv2d(1, out_channel, (2, Embedding_size)), nn.ReLU(), nn.MaxPool2d((sequence_length - 1, 1)))
self.dropout = nn.Dropout(Dropout)
self.fc = nn.Linear(out_channel, num_classs)
def forward(self, X):
batch_size = X.shape[0]
X = self.embedding(X)
X = X.unsqueeze(1)
conved = self.conv(X)
conved = self.dropout(conved)
flatten = conved.view(batch_size, -1)
output = self.fc(flatten)
return F.log_softmax(output)
vocab_size = 2000000
embedding_size = 100
model = TextCNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=Learning_rate)
def binary_acc(pred, y):
"""
计算模型的准确率
:param pred: 预测值
:param y: 实际真实值
:return: 返回准确率
"""
correct = torch.eq(pred, y).float()
acc = correct.sum() / len(correct)
return acc.item()
def train():
avg_acc = []
model.train()
for index, (batch_x, batch_y) in enumerate(TrainDataLoader):
batch_x, batch_y = (batch_x.to(device), batch_y.to(device))
batch_x = batch_x.long()
pred = model(batch_x)
loss = F.nll_loss(pred, batch_y)
acc = binary_acc(torch.max(pred, dim=1)[1], batch_y)
avg_acc.append(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_acc = np.array(avg_acc).mean()
return avg_acc
model_train_acc, model_test_acc = ([], [])
for epoch in range(Epoch):
train_acc = train()
print('epoch = {}, 训练准确率={}'.format(epoch + 1, train_acc))
model_train_acc.append(train_acc) | code |
128027348/cell_3 | [
"text_plain_output_1.png"
] | import io
import pandas as pd
root_path = '/kaggle/input/test-train'
train_path = '/kaggle/input/test-train/train_clean.txt'
import pandas as pd
import io
with open('/kaggle/input/test-train/train_clean.txt', 'r') as f:
train_text = f.read()
train_data = pd.read_csv(io.StringIO(train_text), sep='\t', names=['label', 'comment'])
train_data['comment'] = train_data['comment'].astype(str)
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x).split()))
comments_len
comments_len = train_data.iloc[:, 1].apply(lambda x: len(str(x)))
comments_len
train_data['comments_len'] = comments_len
train_data['comments_len'].describe(percentiles=[0.5, 0.95]) | code |
128027348/cell_14 | [
"text_plain_output_1.png"
] | from gensim.models import keyedvectors
import gensim
from gensim.models import keyedvectors
w2v = keyedvectors.load_word2vec_format('/kaggle/input/tencent/tencent-ailab-embedding-zh-d100-v0.2.0-s/tencent-ailab-embedding-zh-d100-v0.2.0-s.txt', binary=False)
print(len(w2v.key_to_index)) | code |
34139450/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df_train = pd.read_csv('titanic/titanic.csv')
df_train.head() | code |
73089201/cell_13 | [
"image_output_5.png",
"image_output_4.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pandas.plotting import autocorrelation_plot
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection = '3d')
def dscat(data,label1,label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel("pembanding")
ax.set_ylabel("chanel")
ax.set_zlabel("index chanel")
ax.scatter(x, y, z,s=10, c=x, marker='o',alpha=1)
return
dscat( data_clean, 'Fp1','Fp2')
dscat( data_clean, 'F3','F4' )
dscat( data_clean, 'C4','C3' )
dscat( data_clean, 'P3','P4' )
dscat( data_clean, 'O1','O2' )
dscat( data_clean, 'A1','A2' )
dscat( data_clean, 'F7','F8' )
dscat( data_clean, 'T3','T4' )
dscat( data_clean, 'T5','T6' )
dscat( data_clean, 'Fz','Cz' )
dscat( data_clean, 'Pz','X5' )
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
df = (data_clean).unstack().reset_index()
df.columns=["X","Y","Z"]
df['X'] = pd.Categorical(df['X'])
df['X'] = df['X'].cat.codes
x = np.array(df['X'])
y = np.array(df['Y'])
z = df['Z']
print(x.shape,y.shape,z.shape)
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
cmap = ListedColormap(sns.color_palette("husl", 256).as_hex())
sc = ax.scatter(x, y, z, s=40, c=x, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
plt.rcParams['figure.figsize'] = (5, 5)
import pandas as pd
import seaborn as sns
pd.plotting.radviz(data_clean, 'label') | code |
73089201/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import scipy.io as sp
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
pd.plotting.scatter_matrix(data_clean, figsize=(15, 15)) | code |
73089201/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import scipy.io as sp
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
sns.heatmap(datakorelasi, cmap='Blues', annot=True) | code |
73089201/cell_11 | [
"image_output_1.png"
] | from pandas.plotting import autocorrelation_plot
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
def dscat(data, label1, label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel('pembanding')
ax.set_ylabel('chanel')
ax.set_zlabel('index chanel')
ax.scatter(x, y, z, s=10, c=x, marker='o', alpha=1)
return
dscat(data_clean, 'Fp1', 'Fp2')
dscat(data_clean, 'F3', 'F4')
dscat(data_clean, 'C4', 'C3')
dscat(data_clean, 'P3', 'P4')
dscat(data_clean, 'O1', 'O2')
dscat(data_clean, 'A1', 'A2')
dscat(data_clean, 'F7', 'F8')
dscat(data_clean, 'T3', 'T4')
dscat(data_clean, 'T5', 'T6')
dscat(data_clean, 'Fz', 'Cz')
dscat(data_clean, 'Pz', 'X5') | code |
73089201/cell_1 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import scipy.io as sp
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
print(load['sampFreq'])
print(load['nS'])
print(load.dtype)
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
datadf.head() | code |
73089201/cell_7 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import scipy.io as sp
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
print(data_label_0.label.unique(), data_label_0.label.shape) | code |
73089201/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import scipy.io as sp
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data, label1, label2, label3, label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot(data[label1], label=label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1], lag=1, ax=ax[1], label=label1)
ax[1].legend()
ax[2].plot(data[label2], label=label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2], lag=1, ax=ax[3], label=label2)
ax[3].legend()
ax[4].plot(data[label3], label=label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3], lag=1, ax=ax[5], label=label3)
ax[5].legend()
ax[6].plot(data[label4], label=label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4], lag=1, ax=ax[7], label=label4)
ax[7].legend()
return
def plot2_lag(data, label1, label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot(data[label1], label=label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1], lag=1, ax=ax[1], label=label1)
ax[1].legend()
ax[2].plot(data[label2], label=label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2], lag=1, ax=ax[3], label=label2)
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean, 'Fp1', 'Fp2', 'F3', 'F4')
plt_4_2 = plot4_lag(data_clean, 'C3', 'C4', 'P3', 'P4')
plt_4_3 = plot4_lag(data_clean, 'O1', 'O2', 'A1', 'A2')
plt_4_4 = plot4_lag(data_clean, 'F7', 'F8', 'T3', 'T4')
plt_4_5 = plot4_lag(data_clean, 'T5', 'T6', 'Fz', 'Cz')
plt_2 = plot2_lag(data_clean, 'Pz', 'X5') | code |
73089201/cell_16 | [
"image_output_1.png"
] | from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pandas.plotting import autocorrelation_plot
from sklearn.decomposition import PCA, NMF
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection = '3d')
def dscat(data,label1,label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel("pembanding")
ax.set_ylabel("chanel")
ax.set_zlabel("index chanel")
ax.scatter(x, y, z,s=10, c=x, marker='o',alpha=1)
return
dscat( data_clean, 'Fp1','Fp2')
dscat( data_clean, 'F3','F4' )
dscat( data_clean, 'C4','C3' )
dscat( data_clean, 'P3','P4' )
dscat( data_clean, 'O1','O2' )
dscat( data_clean, 'A1','A2' )
dscat( data_clean, 'F7','F8' )
dscat( data_clean, 'T3','T4' )
dscat( data_clean, 'T5','T6' )
dscat( data_clean, 'Fz','Cz' )
dscat( data_clean, 'Pz','X5' )
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
df = (data_clean).unstack().reset_index()
df.columns=["X","Y","Z"]
df['X'] = pd.Categorical(df['X'])
df['X'] = df['X'].cat.codes
x = np.array(df['X'])
y = np.array(df['Y'])
z = df['Z']
print(x.shape,y.shape,z.shape)
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
cmap = ListedColormap(sns.color_palette("husl", 256).as_hex())
sc = ax.scatter(x, y, z, s=40, c=x, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
plt.rcParams['figure.figsize'] = (5, 5)
import pandas as pd
import seaborn as sns
pd.plotting.radviz(data_clean, 'label')
from sklearn.decomposition import PCA, NMF
from sklearn.preprocessing import StandardScaler
data_pca = data_clean.drop(['label'], axis=1)
st = StandardScaler()
data_std = st.fit_transform(data_pca)
tpca = PCA(n_components=6)
data_pca = pd.DataFrame(tpca.fit_transform(data_std))
data_pca.columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6']
data_pca
plot4_lag(data_pca, 'f1', 'f2', 'f3', 'f4')
plot2_lag(data_pca, 'f5', 'f6') | code |
73089201/cell_3 | [
"image_output_1.png"
] | import pandas as pd
import scipy.io as sp
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape)
data_asli.info() | code |
73089201/cell_17 | [
"image_output_1.png"
] | from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pandas.plotting import autocorrelation_plot
from sklearn.decomposition import PCA, NMF
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection = '3d')
def dscat(data,label1,label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel("pembanding")
ax.set_ylabel("chanel")
ax.set_zlabel("index chanel")
ax.scatter(x, y, z,s=10, c=x, marker='o',alpha=1)
return
dscat( data_clean, 'Fp1','Fp2')
dscat( data_clean, 'F3','F4' )
dscat( data_clean, 'C4','C3' )
dscat( data_clean, 'P3','P4' )
dscat( data_clean, 'O1','O2' )
dscat( data_clean, 'A1','A2' )
dscat( data_clean, 'F7','F8' )
dscat( data_clean, 'T3','T4' )
dscat( data_clean, 'T5','T6' )
dscat( data_clean, 'Fz','Cz' )
dscat( data_clean, 'Pz','X5' )
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
df = (data_clean).unstack().reset_index()
df.columns=["X","Y","Z"]
df['X'] = pd.Categorical(df['X'])
df['X'] = df['X'].cat.codes
x = np.array(df['X'])
y = np.array(df['Y'])
z = df['Z']
print(x.shape,y.shape,z.shape)
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
cmap = ListedColormap(sns.color_palette("husl", 256).as_hex())
sc = ax.scatter(x, y, z, s=40, c=x, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
plt.rcParams['figure.figsize'] = (5, 5)
import pandas as pd
import seaborn as sns
pd.plotting.radviz(data_clean, 'label')
from sklearn.decomposition import PCA, NMF
from sklearn.preprocessing import StandardScaler
data_pca = data_clean.drop(['label'], axis=1)
st = StandardScaler()
data_std = st.fit_transform(data_pca)
tpca = PCA(n_components=6)
data_pca = pd.DataFrame(tpca.fit_transform(data_std))
data_pca.columns = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6']
data_pca
pd.plotting.scatter_matrix(data_pca, figsize=(15, 15)) | code |
73089201/cell_14 | [
"image_output_1.png"
] | from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pandas.plotting import autocorrelation_plot
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection = '3d')
def dscat(data,label1,label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel("pembanding")
ax.set_ylabel("chanel")
ax.set_zlabel("index chanel")
ax.scatter(x, y, z,s=10, c=x, marker='o',alpha=1)
return
dscat( data_clean, 'Fp1','Fp2')
dscat( data_clean, 'F3','F4' )
dscat( data_clean, 'C4','C3' )
dscat( data_clean, 'P3','P4' )
dscat( data_clean, 'O1','O2' )
dscat( data_clean, 'A1','A2' )
dscat( data_clean, 'F7','F8' )
dscat( data_clean, 'T3','T4' )
dscat( data_clean, 'T5','T6' )
dscat( data_clean, 'Fz','Cz' )
dscat( data_clean, 'Pz','X5' )
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
df = (data_clean).unstack().reset_index()
df.columns=["X","Y","Z"]
df['X'] = pd.Categorical(df['X'])
df['X'] = df['X'].cat.codes
x = np.array(df['X'])
y = np.array(df['Y'])
z = df['Z']
print(x.shape,y.shape,z.shape)
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
cmap = ListedColormap(sns.color_palette("husl", 256).as_hex())
sc = ax.scatter(x, y, z, s=40, c=x, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
plt.rcParams['figure.figsize'] = (5, 5)
import pandas as pd
import seaborn as sns
pd.plotting.radviz(data_clean, 'label')
data = data_clean
plt.rcParams['figure.figsize'] = (5, 5)
plt.hist(data.label) | code |
73089201/cell_10 | [
"text_plain_output_1.png"
] | from pandas.plotting import autocorrelation_plot
import matplotlib.pyplot as plt
import pandas as pd
import scipy.io as sp
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
autocorrelation_plot(data_clean) | code |
73089201/cell_12 | [
"text_plain_output_1.png"
] | from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pandas.plotting import autocorrelation_plot
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import scipy.io as sp
import seaborn as sns
import seaborn as sns
import scipy.io as sp
import numpy as np
import pandas as pd
def load_data(file):
file = sp.loadmat(file)
load = file['o']
data = pd.DataFrame(load['data'][0, 0])
marker = pd.DataFrame(load['marker'][0, 0])
datadf = pd.concat([data, marker], axis=1)
datadf.columns = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'A1', 'A2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'X5', 'label']
return datadf
datadf = load_data('../input/bigdatasfinger/5F-SubjectB-151110-5St-SGLHand.mat')
data_asli = datadf.head(50000)
print(data_asli.shape,)
data_asli.info()
data_clean = data_asli
import matplotlib.pyplot as plt
import seaborn as sns
datakorelasi = data_clean.drop(['label'], axis=1).corr() * 100
plt.rcParams['figure.figsize'] = (15, 15)
data_label_0 = data_clean[data_clean.label == 0]
from pandas.plotting import lag_plot
def plot4_lag(data,label1,label2,label3,label4):
fig, ax = plt.subplots(1, 8, figsize=(20, 5))
ax[0].plot( data[label1] ,label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
ax[4].plot( data[label3],label =label3)
ax[4].legend()
pd.plotting.lag_plot(data[label3],lag=1,ax =ax[5],label =label3);
ax[5].legend()
ax[6].plot( data[label4],label =label4)
ax[6].legend()
pd.plotting.lag_plot(data[label4],lag=1,ax =ax[7],label =label4);
ax[7].legend()
return
def plot2_lag(data,label1,label2):
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
ax[0].plot( data[label1],label =label1)
ax[0].legend()
pd.plotting.lag_plot(data[label1],lag=1,ax =ax[1],label =label1);
ax[1].legend()
ax[2].plot( data[label2],label =label2)
ax[2].legend()
pd.plotting.lag_plot(data[label2],lag=1,ax =ax[3],label =label2);
ax[3].legend()
return
plt_4_1 = plot4_lag(data_clean,'Fp1','Fp2','F3','F4')
plt_4_2 = plot4_lag(data_clean,'C3','C4','P3','P4')
plt_4_3 = plot4_lag(data_clean,'O1','O2','A1','A2')
plt_4_4 = plot4_lag(data_clean,'F7','F8','T3','T4')
plt_4_5 = plot4_lag(data_clean,'T5','T6','Fz','Cz')
plt_2 = plot2_lag(data_clean,'Pz','X5')
from pandas.plotting import autocorrelation_plot
plt.rcParams['figure.figsize'] = (15, 15)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection = '3d')
def dscat(data,label1,label2):
x = data[label1]
y = data[label1]
z = data[label2]
ax.set_xlabel("pembanding")
ax.set_ylabel("chanel")
ax.set_zlabel("index chanel")
ax.scatter(x, y, z,s=10, c=x, marker='o',alpha=1)
return
dscat( data_clean, 'Fp1','Fp2')
dscat( data_clean, 'F3','F4' )
dscat( data_clean, 'C4','C3' )
dscat( data_clean, 'P3','P4' )
dscat( data_clean, 'O1','O2' )
dscat( data_clean, 'A1','A2' )
dscat( data_clean, 'F7','F8' )
dscat( data_clean, 'T3','T4' )
dscat( data_clean, 'T5','T6' )
dscat( data_clean, 'Fz','Cz' )
dscat( data_clean, 'Pz','X5' )
import re, seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
df = data_clean.unstack().reset_index()
df.columns = ['X', 'Y', 'Z']
df['X'] = pd.Categorical(df['X'])
df['X'] = df['X'].cat.codes
x = np.array(df['X'])
y = np.array(df['Y'])
z = df['Z']
print(x.shape, y.shape, z.shape)
fig = plt.figure(figsize=(6, 6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
cmap = ListedColormap(sns.color_palette('husl', 256).as_hex())
sc = ax.scatter(x, y, z, s=40, c=x, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2) | code |
104127284/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import GRU, Input, Dense, Activation, RepeatVector, Bidirectional, LSTM, Dropout, Embedding
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.text import Tokenizer
from sklearn.metrics import confusion_matrix, classification_report, f1_score
from sklearn.model_selection import train_test_split,StratifiedKFold
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib as plt
import seaborn as sns
import tensorflow as tf
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Sequential
from keras.layers import GRU, Input, Dense, Activation, RepeatVector, Bidirectional, LSTM, Dropout, Embedding
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.losses import sparse_categorical_crossentropy
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import confusion_matrix, classification_report, f1_score
import collections
from tensorflow.python.client import device_lib
import matplotlib.pyplot as plt
import seaborn as sns
import re
import string
import emoji
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
SEED = 10
df = pd.read_csv('../input/nlp-getting-started/train.csv')
df_test = pd.read_csv('../input/nlp-getting-started/test.csv')
df.dropna(subset=['text'], inplace=True)
X = df['text']
y = df['target']
X_test = df_test['text']
def get_model():
model = tf.keras.Sequential([Input(name='inputs', shape=[MAX_LEN]), Embedding(len(tok.word_index), 128), Bidirectional(tf.keras.layers.LSTM(128, return_sequences=True)), Bidirectional(tf.keras.layers.LSTM(64)), Dense(64, activation='relu'), Dropout(0.5), Dense(1, activation='sigmoid')])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(0.0001), metrics=['accuracy'])
return model
skf = StratifiedKFold(n_splits=5, random_state=SEED, shuffle=True)
CV_score_array = []
y_test_list = []
for fold, (train_idx, test_idx) in enumerate(skf.split(X, y)):
print(f'Fold: {fold + 1},', end=' ')
X_train, X_valid = (X[train_idx], X[test_idx])
y_train, y_valid = (y[train_idx], y[test_idx])
MAX_LEN = 50
tok = Tokenizer()
tok.fit_on_texts(X_train)
sequences = tok.texts_to_sequences(X_train)
valid_sequences = tok.texts_to_sequences(X_valid)
test_sequences = tok.texts_to_sequences(X_test)
X_train_seq = sequence.pad_sequences(sequences, maxlen=MAX_LEN)
X_valid_seq = sequence.pad_sequences(valid_sequences, maxlen=MAX_LEN)
X_test_seq = sequence.pad_sequences(test_sequences, maxlen=MAX_LEN)
model = get_model()
history = model.fit(X_train_seq, y_train, epochs=10, validation_data=(X_valid_seq, y_valid), batch_size=32, callbacks=[EarlyStopping(monitor='val_accuracy', mode='max', patience=3, verbose=False, restore_best_weights=True)])
yhat_valid = np.where(model.predict(X_valid_seq) >= 0.5, 1, 0)
f_score = f1_score(y_valid, yhat_valid)
print('F1 Score: ' + str(f_score))
print(classification_report(y_valid, yhat_valid))
y_test_list.append(model.predict(X_test_seq))
CV_score_array.append(f_score)
print('Average F1 Score 5 Folds: ' + str(np.array(CV_score_array).mean())) | code |
104127284/cell_9 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import matplotlib as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('../input/nlp-getting-started/train.csv')
df_test = pd.read_csv('../input/nlp-getting-started/test.csv')
df.dropna(subset=['text'], inplace=True)
X = df['text']
y = df['target']
X_test = df_test['text']
df['num_words'] = df['text'].apply(lambda x: len(x.split()))
plt.figure(figsize=(20, 6))
sns.histplot(df['num_words'], bins=range(1, 50, 2), palette='Set1', alpha=0.8)
plt.title('Distribution of the word count') | code |
104127284/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104127284/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('../input/nlp-getting-started/train.csv')
df_test = pd.read_csv('../input/nlp-getting-started/test.csv')
df.dropna(subset=['text'], inplace=True)
X = df['text']
y = df['target']
X_test = df_test['text']
plt.figure(figsize=(10, 6))
sns.countplot(x=df['target'], palette='Set1', alpha=0.8)
plt.title('Distribution of the Target Label') | code |
32068545/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2, 5, sharex=False, sharey=True, figsize=(25, 10))
for i, ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line', x='time', y=['signal'], ax=ax, linewidth=0.1)
ax.set_title('Batch_' + str(i))
ax.set_ylim(-5, 14)
ax.legend()
fig.suptitle('Training Data', y=1.05)
plt.tight_layout() | code |
32068545/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
train.head() | code |
32068545/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068545/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2,5,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,14)
ax.legend()
fig.suptitle('Training Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(1,4,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
test.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,11)
ax.legend()
fig.suptitle('Testing Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(2,5,sharex=True,sharey=True, figsize=(20,8))
for i,ax in enumerate(axes.ravel()):
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 0')['signal'],ax=ax,label='0')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 1')['signal'],ax=ax,label='1')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 2')['signal'],ax=ax,label='2')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 3')['signal'],ax=ax,label='3')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 4')['signal'],ax=ax,label='4')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 5')['signal'],ax=ax,label='5')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 6')['signal'],ax=ax,label='6')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 7')['signal'],ax=ax,label='7')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 8')['signal'],ax=ax,label='8')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 9')['signal'],ax=ax,label='9')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 10')['signal'],ax=ax,label='10')
ax.set_title('Batch_'+str(i))
ax.legend()
fig.suptitle('Training Data',y=1.05)
ax.set_ylim(0,2)
plt.tight_layout()
train_seg_boundaries = np.concatenate([[0, 500000, 600000], np.arange(1000000, 5000000 + 1, 500000)])
train_signal = np.split(np.zeros(5000000), train_seg_boundaries[1:-1])
test_seg_boundaries = np.concatenate([np.arange(0, 1000000 + 1, 100000), [1500000, 2000000]])
test_signal = np.split(np.zeros(2000000), test_seg_boundaries[1:-1])
test['signal_type'] = np.concatenate([test_signal[0] + 1, test_signal[1] + 3, test_signal[2] + 4, test_signal[3] + 1, test_signal[4] + 2, test_signal[5] + 5, test_signal[6] + 4, test_signal[7] + 5, test_signal[8] + 1, test_signal[9] + 3, test_signal[10] + 1, test_signal[11] + 1])
test['signal'].plot(kind='line', linewidth=0.2, label='Test Signal')
test['signal_type'].plot(kind='line', label='Signal Type')
plt.legend()
del test_signal | code |
32068545/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2,5,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,14)
ax.legend()
fig.suptitle('Training Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(1,4,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
test.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,11)
ax.legend()
fig.suptitle('Testing Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(2,5,sharex=True,sharey=True, figsize=(20,8))
for i,ax in enumerate(axes.ravel()):
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 0')['signal'],ax=ax,label='0')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 1')['signal'],ax=ax,label='1')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 2')['signal'],ax=ax,label='2')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 3')['signal'],ax=ax,label='3')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 4')['signal'],ax=ax,label='4')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 5')['signal'],ax=ax,label='5')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 6')['signal'],ax=ax,label='6')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 7')['signal'],ax=ax,label='7')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 8')['signal'],ax=ax,label='8')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 9')['signal'],ax=ax,label='9')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 10')['signal'],ax=ax,label='10')
ax.set_title('Batch_'+str(i))
ax.legend()
fig.suptitle('Training Data',y=1.05)
ax.set_ylim(0,2)
plt.tight_layout()
train_seg_boundaries = np.concatenate([[0, 500000, 600000], np.arange(1000000, 5000000 + 1, 500000)])
train_signal = np.split(np.zeros(5000000), train_seg_boundaries[1:-1])
test_seg_boundaries = np.concatenate([np.arange(0, 1000000 + 1, 100000), [1500000, 2000000]])
test_signal = np.split(np.zeros(2000000), test_seg_boundaries[1:-1])
test['signal_type'] = np.concatenate([test_signal[0] + 1, test_signal[1] + 3, test_signal[2] + 4, test_signal[3] + 1, test_signal[4] + 2, test_signal[5] + 5, test_signal[6] + 4, test_signal[7] + 5, test_signal[8] + 1, test_signal[9] + 3, test_signal[10] + 1, test_signal[11] + 1])
del test_signal
train['signal_type'] = np.concatenate([train_signal[0] + 1, train_signal[1] + 1, train_signal[2] + 1, train_signal[3] + 2, train_signal[4] + 3, train_signal[5] + 5, train_signal[6] + 4, train_signal[7] + 2, train_signal[8] + 3, train_signal[9] + 4, train_signal[10] + 5])
train['signal'].plot(kind='line', linewidth=0.2, label='Train Signal')
train['signal_type'].plot(kind='line', label='Signal Type')
plt.legend()
del train_signal | code |
32068545/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from scipy.optimize import minimize
from sklearn.metrics import f1_score, classification_report
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2,5,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,14)
ax.legend()
fig.suptitle('Training Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(1,4,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
test.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,11)
ax.legend()
fig.suptitle('Testing Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(2,5,sharex=True,sharey=True, figsize=(20,8))
for i,ax in enumerate(axes.ravel()):
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 0')['signal'],ax=ax,label='0')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 1')['signal'],ax=ax,label='1')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 2')['signal'],ax=ax,label='2')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 3')['signal'],ax=ax,label='3')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 4')['signal'],ax=ax,label='4')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 5')['signal'],ax=ax,label='5')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 6')['signal'],ax=ax,label='6')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 7')['signal'],ax=ax,label='7')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 8')['signal'],ax=ax,label='8')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 9')['signal'],ax=ax,label='9')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 10')['signal'],ax=ax,label='10')
ax.set_title('Batch_'+str(i))
ax.legend()
fig.suptitle('Training Data',y=1.05)
ax.set_ylim(0,2)
plt.tight_layout()
means = train.groupby(['signal_type', 'open_channels']).mean().signal
train['scaled_signal'] = train['signal']
test['scaled_signal'] = test['signal']
def shift_model(x, sig_type):
scaled = (train.loc[train.signal_type == sig_type, 'signal'] - x[0]) * x[1]
target = train.loc[train.signal_type == sig_type, 'open_channels']
return -f1_score(target, scaled.clip(0, 10).round(), average='weighted')
for i in range(1, 5):
print(i)
min_f = minimize(shift_model, [means.loc[i, 0], 1 / (means.loc[i, 1] - means.loc[i, 0])], args=i, method='Powell')
train.loc[train.signal_type == i, 'scaled_signal'] = (train.loc[train.signal_type == i, 'signal'] - min_f['x'][0]) * min_f['x'][1]
test.loc[test.signal_type == i, 'scaled_signal'] = (test.loc[test.signal_type == i, 'signal'] - min_f['x'][0]) * min_f['x'][1]
i = 5
min_f = minimize(shift_model, [means.loc[i, 1] - 1, 5 / (means.loc[i, 6] - means.loc[i, 1])], args=i, method='Powell')
train.loc[train.signal_type == i, 'scaled_signal'] = (train.loc[train.signal_type == i, 'signal'] - min_f['x'][0]) * min_f['x'][1]
test.loc[test.signal_type == i, 'scaled_signal'] = (test.loc[test.signal_type == i, 'signal'] - min_f['x'][0]) * min_f['x'][1]
del means | code |
32068545/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2,5,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,14)
ax.legend()
fig.suptitle('Training Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(1, 4, sharex=False, sharey=True, figsize=(25, 10))
for i, ax in enumerate(axes.ravel()):
test.iloc[batch_indices[i]].plot(kind='line', x='time', y=['signal'], ax=ax, linewidth=0.1)
ax.set_title('Batch_' + str(i))
ax.set_ylim(-5, 11)
ax.legend()
fig.suptitle('Testing Data', y=1.05)
plt.tight_layout() | code |
32068545/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
batch_indices = [slice(500000 * i, 500000 * (i + 1)) for i in range(10)]
fig, axes = plt.subplots(2,5,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
train.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,14)
ax.legend()
fig.suptitle('Training Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(1,4,sharex=False,sharey=True, figsize=(25,10))
for i,ax in enumerate(axes.ravel()):
test.iloc[batch_indices[i]].plot(kind='line',x='time',y=['signal'],ax=ax,linewidth=.1)
ax.set_title('Batch_'+str(i))
ax.set_ylim(-5,11)
ax.legend()
fig.suptitle('Testing Data',y=1.05)
plt.tight_layout()
fig, axes = plt.subplots(2, 5, sharex=True, sharey=True, figsize=(20, 8))
for i, ax in enumerate(axes.ravel()):
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 0')['signal'], ax=ax, label='0')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 1')['signal'], ax=ax, label='1')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 2')['signal'], ax=ax, label='2')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 3')['signal'], ax=ax, label='3')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 4')['signal'], ax=ax, label='4')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 5')['signal'], ax=ax, label='5')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 6')['signal'], ax=ax, label='6')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 7')['signal'], ax=ax, label='7')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 8')['signal'], ax=ax, label='8')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 9')['signal'], ax=ax, label='9')
sns.distplot(train.iloc[batch_indices[i]].query('open_channels == 10')['signal'], ax=ax, label='10')
ax.set_title('Batch_' + str(i))
ax.legend()
fig.suptitle('Training Data', y=1.05)
ax.set_ylim(0, 2)
plt.tight_layout() | code |
32068545/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/data-without-drift/train_clean.csv')
test = pd.read_csv('/kaggle/input/data-without-drift/test_clean.csv')
train.info() | code |
17098455/cell_21 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
spam_filter = Pipeline([('vectorizer', TfidfVectorizer(analyzer=process)), ('classifier', MultinomialNB())])
spam_filter.fit(x_train, y_train)
predictions = spam_filter.predict(x_test)
count = 0
for i in range(len(y_test)):
if y_test.iloc[i] != predictions[i]:
count += 1
x_test[y_test != predictions] | code |
17098455/cell_13 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import string
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe()
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
tfidfv = TfidfVectorizer(analyzer=process)
data = tfidfv.fit_transform(df['Message'])
mess = df.iloc[2]['Message']
print(tfidfv.transform([mess])) | code |
17098455/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe()
sns.countplot(data=df, x='Label') | code |
17098455/cell_23 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
spam_filter = Pipeline([('vectorizer', TfidfVectorizer(analyzer=process)), ('classifier', MultinomialNB())])
spam_filter.fit(x_train, y_train)
predictions = spam_filter.predict(x_test)
def detect_spam(s):
return spam_filter.predict([s])[0]
detect_spam('Your cash-balance is currently 500 pounds - to maximize your cash-in now, send COLLECT to 83600.') | code |
17098455/cell_20 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
spam_filter = Pipeline([('vectorizer', TfidfVectorizer(analyzer=process)), ('classifier', MultinomialNB())])
spam_filter.fit(x_train, y_train)
predictions = spam_filter.predict(x_test)
count = 0
for i in range(len(y_test)):
if y_test.iloc[i] != predictions[i]:
count += 1
print('Total number of test cases', len(y_test))
print('Number of wrong of predictions', count) | code |
17098455/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.head() | code |
17098455/cell_1 | [
"text_plain_output_1.png"
] | import os
print(os.listdir('../input'))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
from nltk.corpus import stopwords
from nltk import PorterStemmer as Stemmer | code |
17098455/cell_7 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
process('It\\s has been a long day running.') | code |
17098455/cell_18 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
spam_filter = Pipeline([('vectorizer', TfidfVectorizer(analyzer=process)), ('classifier', MultinomialNB())])
spam_filter.fit(x_train, y_train) | code |
17098455/cell_8 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import pandas as pd
import string
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe()
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
df['Message'][:20].apply(process) | code |
17098455/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe() | code |
17098455/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import string
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe()
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
tfidfv = TfidfVectorizer(analyzer=process)
data = tfidfv.fit_transform(df['Message'])
mess = df.iloc[2]['Message']
j = tfidfv.transform([mess]).toarray()[0]
print('index\tidf\ttfidf\tterm')
for i in range(len(j)):
if j[i] != 0:
print(i, format(tfidfv.idf_[i], '.4f'), format(j[i], '.4f'), tfidfv.get_feature_names()[i], sep='\t') | code |
17098455/cell_22 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import string
def process(text):
text = text.lower()
text = ''.join([t for t in text if t not in string.punctuation])
text = [t for t in text.split() if t not in stopwords.words('english')]
st = Stemmer()
text = [st.stem(t) for t in text]
return text
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
spam_filter = Pipeline([('vectorizer', TfidfVectorizer(analyzer=process)), ('classifier', MultinomialNB())])
spam_filter.fit(x_train, y_train)
predictions = spam_filter.predict(x_test)
count = 0
for i in range(len(y_test)):
if y_test.iloc[i] != predictions[i]:
count += 1
from sklearn.metrics import classification_report
print(classification_report(predictions, y_test)) | code |
17098455/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.DataFrame(pd.read_csv('../input/spam.csv', encoding='latin-1')[['v1', 'v2']])
df.columns = ['Label', 'Message']
df.groupby('Label').describe()
mess = df.iloc[2]['Message']
print(mess) | code |
74067865/cell_13 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd
import plotly.graph_objects as go
import plotly.graph_objects as go
healthsysdf = pd.read_csv('../input/world-bank-wdi-212-health-systems/2.12_Health_systems.csv')
healthsysdf = healthsysdf.drop(columns='Province_State')
healthsysdf = healthsysdf.drop(columns='Country_Region')
healthsysdf['Total_Gov_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_public_pct_2016, axis=1)
healthsysdf['Outofpocket_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_out_of_pocket_pct_2016, axis=1)
healthsysdf['Other_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 - row.Total_Gov_Spend - row.Outofpocket_Spend, axis=1)
countrycodes = ['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'ATG', 'ARG', 'ARM', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR', 'BEL', 'BLZ', 'BEN', 'BTN', 'BOL', 'BIH', 'BWA', 'BRA', 'BRN', 'BGR', 'BFA', 'BDI', 'CPV', 'KHM', 'CMR', 'CAN', '', 'CAF', 'TCD', '', 'CHL', 'CHN', '', '', 'COL', 'COM', 'COD', 'COG', 'CRI', 'CIV', 'HRV', 'CUB', 'CYP', 'CZE', 'DNK', 'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST', 'SWZ', 'ETH', '', 'FJI', 'FIN', 'FRA', '', 'GAB', 'GMB', 'GEO', 'DEU', 'GHA', 'GRC', '', 'GRD', '', 'GTM', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HUN', 'ISL', 'IND', 'IDN', 'IRN', 'IRQ', 'IRL', '', 'ISR', 'ITA', 'JAM', 'JPN', 'JOR', 'KAZ', 'KEN', 'KIR', '', 'KOR', '', 'KWT', 'KGZ', 'LAO', 'LVA', 'LBN', 'LSO', 'LBR', '', '', 'LTU', 'LUX', 'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MHL', 'MRT', 'MUS', 'MEX', 'FSM', 'MDA', 'MCO', 'MNG', 'MNE', 'MAR', 'MOZ', 'MMR', 'NAM', 'NPL', 'NLD', '', 'NZL', 'NGA', 'NER', 'NGA', 'MKD', '', 'NOR', 'OMN', 'PAK', 'PLW', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL', 'PRT', '', 'QAT', 'ROU', 'RUS', 'RWA', 'WSM', 'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', '', 'SVK', 'SVN', 'SLB', '', 'ZAF', '', 'ESP', 'LKA', 'KNA', 'LCA', '', 'VCT', 'SDN', 'SUR', 'SWE', 'CHE', '', 'TJK', 'TZA', 'THA', 'TLS', 'TGO', 'TON', 'TTO', 'TUN', 'TUR', 'TKM', '', 'TUV', 'UGA', 'UKR', 'ARE', 'GBR', 'USA', 'URY', 'UZB', 'VUT', 'VEN', 'VNM', '', '', 'YEM', 'ZMB', 'ZWE']
healthsysdf['Country_Codes'] = countrycodes
bginfo = pd.read_csv('../input/undata-country-profiles/country_profile_variables.csv')
bginfo.rename(columns={'country': 'World_Bank_Name'}, inplace=True)
bginfo = bginfo.replace({'United States of America': 'United States', 'Viet Nam': 'Vietnam'})
healthsysdf = healthsysdf.replace({'Yemen, Rep.': 'Yemen'})
healthsysdf = pd.merge(healthsysdf, bginfo, on='World_Bank_Name', how='outer')
healthsysdf = healthsysdf.dropna(thresh=3)
badgdp = healthsysdf[healthsysdf['GDP: Gross domestic product (million current US$)'] < 0].index
healthsysdf.drop(badgdp, inplace=True)
healthsysdf.replace({'SouthernAsia': 'Asia', 'WesternAsia': 'Asia', 'EasternAsia': 'Asia', 'CentralAsia': 'Asia', 'South-easternAsia': 'Asia', 'WesternEurope': 'Europe', 'SouthernEurope': 'Europe', 'EasternEurope': 'Europe', 'NorthernEurope': 'Europe', 'NorthernAfrica': 'Africa', 'MiddleAfrica': 'Africa', 'WesternAfrica': 'Africa', 'EasternAfrica': 'Africa', 'SouthernAfrica': 'Africa', 'SouthAmerica': 'Americas', 'Caribbean': 'Americas', 'CentralAmerica': 'Americas', 'NorthernAmerica': 'Americas', 'Polynesia': 'Oceania', 'Melanesia': 'Oceania', 'Micronesia': 'Oceania'}, inplace=True)
total_exp = healthsysdf.sort_values('Health_exp_pct_GDP_2016', ascending = False)
top_ten_exp = total_exp.head(10)
total_exp = total_exp.sort_values('Health_exp_pct_GDP_2016')
low_ten_exp = total_exp.head(10)
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(
go.Bar(x=top_ten_exp['World_Bank_Name'], y=top_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=1
)
fig.add_trace(
go.Bar(x=low_ten_exp['World_Bank_Name'], y=low_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=2
)
fig.update_layout(
title={
'text': "Ten highest and lowest spenders",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
plot_bgcolor= 'white',
paper_bgcolor= 'white',
yaxis_title="% of GDP spent on healthcare",
showlegend=False,
font=dict(
family="Courier New, monospace",
size=14,
color="#7f7f7f"
)
)
fig.show()
import plotly.graph_objects as go
import pandas as pd
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Health_exp_pct_GDP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Percentage of GDP spent on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Total_Gov_Spend'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Government Spending on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['per_capita_exp_PPP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Healthcare Spending per Capita', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig.show() | code |
74067865/cell_9 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd
import plotly.graph_objects as go
import plotly.graph_objects as go
healthsysdf = pd.read_csv('../input/world-bank-wdi-212-health-systems/2.12_Health_systems.csv')
healthsysdf = healthsysdf.drop(columns='Province_State')
healthsysdf = healthsysdf.drop(columns='Country_Region')
healthsysdf['Total_Gov_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_public_pct_2016, axis=1)
healthsysdf['Outofpocket_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_out_of_pocket_pct_2016, axis=1)
healthsysdf['Other_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 - row.Total_Gov_Spend - row.Outofpocket_Spend, axis=1)
countrycodes = ['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'ATG', 'ARG', 'ARM', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR', 'BEL', 'BLZ', 'BEN', 'BTN', 'BOL', 'BIH', 'BWA', 'BRA', 'BRN', 'BGR', 'BFA', 'BDI', 'CPV', 'KHM', 'CMR', 'CAN', '', 'CAF', 'TCD', '', 'CHL', 'CHN', '', '', 'COL', 'COM', 'COD', 'COG', 'CRI', 'CIV', 'HRV', 'CUB', 'CYP', 'CZE', 'DNK', 'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST', 'SWZ', 'ETH', '', 'FJI', 'FIN', 'FRA', '', 'GAB', 'GMB', 'GEO', 'DEU', 'GHA', 'GRC', '', 'GRD', '', 'GTM', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HUN', 'ISL', 'IND', 'IDN', 'IRN', 'IRQ', 'IRL', '', 'ISR', 'ITA', 'JAM', 'JPN', 'JOR', 'KAZ', 'KEN', 'KIR', '', 'KOR', '', 'KWT', 'KGZ', 'LAO', 'LVA', 'LBN', 'LSO', 'LBR', '', '', 'LTU', 'LUX', 'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MHL', 'MRT', 'MUS', 'MEX', 'FSM', 'MDA', 'MCO', 'MNG', 'MNE', 'MAR', 'MOZ', 'MMR', 'NAM', 'NPL', 'NLD', '', 'NZL', 'NGA', 'NER', 'NGA', 'MKD', '', 'NOR', 'OMN', 'PAK', 'PLW', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL', 'PRT', '', 'QAT', 'ROU', 'RUS', 'RWA', 'WSM', 'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', '', 'SVK', 'SVN', 'SLB', '', 'ZAF', '', 'ESP', 'LKA', 'KNA', 'LCA', '', 'VCT', 'SDN', 'SUR', 'SWE', 'CHE', '', 'TJK', 'TZA', 'THA', 'TLS', 'TGO', 'TON', 'TTO', 'TUN', 'TUR', 'TKM', '', 'TUV', 'UGA', 'UKR', 'ARE', 'GBR', 'USA', 'URY', 'UZB', 'VUT', 'VEN', 'VNM', '', '', 'YEM', 'ZMB', 'ZWE']
healthsysdf['Country_Codes'] = countrycodes
bginfo = pd.read_csv('../input/undata-country-profiles/country_profile_variables.csv')
bginfo.rename(columns={'country': 'World_Bank_Name'}, inplace=True)
bginfo = bginfo.replace({'United States of America': 'United States', 'Viet Nam': 'Vietnam'})
healthsysdf = healthsysdf.replace({'Yemen, Rep.': 'Yemen'})
healthsysdf = pd.merge(healthsysdf, bginfo, on='World_Bank_Name', how='outer')
healthsysdf = healthsysdf.dropna(thresh=3)
badgdp = healthsysdf[healthsysdf['GDP: Gross domestic product (million current US$)'] < 0].index
healthsysdf.drop(badgdp, inplace=True)
healthsysdf.replace({'SouthernAsia': 'Asia', 'WesternAsia': 'Asia', 'EasternAsia': 'Asia', 'CentralAsia': 'Asia', 'South-easternAsia': 'Asia', 'WesternEurope': 'Europe', 'SouthernEurope': 'Europe', 'EasternEurope': 'Europe', 'NorthernEurope': 'Europe', 'NorthernAfrica': 'Africa', 'MiddleAfrica': 'Africa', 'WesternAfrica': 'Africa', 'EasternAfrica': 'Africa', 'SouthernAfrica': 'Africa', 'SouthAmerica': 'Americas', 'Caribbean': 'Americas', 'CentralAmerica': 'Americas', 'NorthernAmerica': 'Americas', 'Polynesia': 'Oceania', 'Melanesia': 'Oceania', 'Micronesia': 'Oceania'}, inplace=True)
total_exp = healthsysdf.sort_values('Health_exp_pct_GDP_2016', ascending = False)
top_ten_exp = total_exp.head(10)
total_exp = total_exp.sort_values('Health_exp_pct_GDP_2016')
low_ten_exp = total_exp.head(10)
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(
go.Bar(x=top_ten_exp['World_Bank_Name'], y=top_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=1
)
fig.add_trace(
go.Bar(x=low_ten_exp['World_Bank_Name'], y=low_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=2
)
fig.update_layout(
title={
'text': "Ten highest and lowest spenders",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
plot_bgcolor= 'white',
paper_bgcolor= 'white',
yaxis_title="% of GDP spent on healthcare",
showlegend=False,
font=dict(
family="Courier New, monospace",
size=14,
color="#7f7f7f"
)
)
fig.show()
import plotly.graph_objects as go
import pandas as pd
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Health_exp_pct_GDP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Percentage of GDP spent on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig.show() | code |
74067865/cell_11 | [
"text_html_output_2.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd
import plotly.graph_objects as go
import plotly.graph_objects as go
healthsysdf = pd.read_csv('../input/world-bank-wdi-212-health-systems/2.12_Health_systems.csv')
healthsysdf = healthsysdf.drop(columns='Province_State')
healthsysdf = healthsysdf.drop(columns='Country_Region')
healthsysdf['Total_Gov_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_public_pct_2016, axis=1)
healthsysdf['Outofpocket_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_out_of_pocket_pct_2016, axis=1)
healthsysdf['Other_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 - row.Total_Gov_Spend - row.Outofpocket_Spend, axis=1)
countrycodes = ['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'ATG', 'ARG', 'ARM', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR', 'BEL', 'BLZ', 'BEN', 'BTN', 'BOL', 'BIH', 'BWA', 'BRA', 'BRN', 'BGR', 'BFA', 'BDI', 'CPV', 'KHM', 'CMR', 'CAN', '', 'CAF', 'TCD', '', 'CHL', 'CHN', '', '', 'COL', 'COM', 'COD', 'COG', 'CRI', 'CIV', 'HRV', 'CUB', 'CYP', 'CZE', 'DNK', 'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST', 'SWZ', 'ETH', '', 'FJI', 'FIN', 'FRA', '', 'GAB', 'GMB', 'GEO', 'DEU', 'GHA', 'GRC', '', 'GRD', '', 'GTM', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HUN', 'ISL', 'IND', 'IDN', 'IRN', 'IRQ', 'IRL', '', 'ISR', 'ITA', 'JAM', 'JPN', 'JOR', 'KAZ', 'KEN', 'KIR', '', 'KOR', '', 'KWT', 'KGZ', 'LAO', 'LVA', 'LBN', 'LSO', 'LBR', '', '', 'LTU', 'LUX', 'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MHL', 'MRT', 'MUS', 'MEX', 'FSM', 'MDA', 'MCO', 'MNG', 'MNE', 'MAR', 'MOZ', 'MMR', 'NAM', 'NPL', 'NLD', '', 'NZL', 'NGA', 'NER', 'NGA', 'MKD', '', 'NOR', 'OMN', 'PAK', 'PLW', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL', 'PRT', '', 'QAT', 'ROU', 'RUS', 'RWA', 'WSM', 'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', '', 'SVK', 'SVN', 'SLB', '', 'ZAF', '', 'ESP', 'LKA', 'KNA', 'LCA', '', 'VCT', 'SDN', 'SUR', 'SWE', 'CHE', '', 'TJK', 'TZA', 'THA', 'TLS', 'TGO', 'TON', 'TTO', 'TUN', 'TUR', 'TKM', '', 'TUV', 'UGA', 'UKR', 'ARE', 'GBR', 'USA', 'URY', 'UZB', 'VUT', 'VEN', 'VNM', '', '', 'YEM', 'ZMB', 'ZWE']
healthsysdf['Country_Codes'] = countrycodes
bginfo = pd.read_csv('../input/undata-country-profiles/country_profile_variables.csv')
bginfo.rename(columns={'country': 'World_Bank_Name'}, inplace=True)
bginfo = bginfo.replace({'United States of America': 'United States', 'Viet Nam': 'Vietnam'})
healthsysdf = healthsysdf.replace({'Yemen, Rep.': 'Yemen'})
healthsysdf = pd.merge(healthsysdf, bginfo, on='World_Bank_Name', how='outer')
healthsysdf = healthsysdf.dropna(thresh=3)
badgdp = healthsysdf[healthsysdf['GDP: Gross domestic product (million current US$)'] < 0].index
healthsysdf.drop(badgdp, inplace=True)
healthsysdf.replace({'SouthernAsia': 'Asia', 'WesternAsia': 'Asia', 'EasternAsia': 'Asia', 'CentralAsia': 'Asia', 'South-easternAsia': 'Asia', 'WesternEurope': 'Europe', 'SouthernEurope': 'Europe', 'EasternEurope': 'Europe', 'NorthernEurope': 'Europe', 'NorthernAfrica': 'Africa', 'MiddleAfrica': 'Africa', 'WesternAfrica': 'Africa', 'EasternAfrica': 'Africa', 'SouthernAfrica': 'Africa', 'SouthAmerica': 'Americas', 'Caribbean': 'Americas', 'CentralAmerica': 'Americas', 'NorthernAmerica': 'Americas', 'Polynesia': 'Oceania', 'Melanesia': 'Oceania', 'Micronesia': 'Oceania'}, inplace=True)
total_exp = healthsysdf.sort_values('Health_exp_pct_GDP_2016', ascending = False)
top_ten_exp = total_exp.head(10)
total_exp = total_exp.sort_values('Health_exp_pct_GDP_2016')
low_ten_exp = total_exp.head(10)
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(
go.Bar(x=top_ten_exp['World_Bank_Name'], y=top_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=1
)
fig.add_trace(
go.Bar(x=low_ten_exp['World_Bank_Name'], y=low_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=2
)
fig.update_layout(
title={
'text': "Ten highest and lowest spenders",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
plot_bgcolor= 'white',
paper_bgcolor= 'white',
yaxis_title="% of GDP spent on healthcare",
showlegend=False,
font=dict(
family="Courier New, monospace",
size=14,
color="#7f7f7f"
)
)
fig.show()
import plotly.graph_objects as go
import pandas as pd
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Health_exp_pct_GDP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Percentage of GDP spent on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Total_Gov_Spend'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Government Spending on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig.show() | code |
74067865/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import math
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib as mpl
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.express as px | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.