path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
49124084/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
data = pd.read_csv('../input/twitter-train/train.txt', delimiter='\n', header=None)
data_array = data.to_numpy()
x_array = np.reshape(data_array, (-1, 3))
column = ['Tweet', 'Target', 'Sentiment']
data = pd.DataFrame(data=x_array, columns=column)
data
my_dataset = data
my_dataset = my_dataset.drop(['Target', 'Sentiment'], axis=1)
my_target = data.drop(['Tweet', 'Sentiment'], axis=1)
for i in my_dataset.index:
x = my_dataset['Tweet'][i].find('$T$')
s = my_dataset['Tweet'][1].replace('$T$', my_target['Target'][0])
j = 0
my_targetless_tweet = []
for i in range(6248):
my_targetless_tweet.insert(i, my_dataset['Tweet'][i].replace('$T$', my_target['Target'][j]))
j = j + 1
my_targetless_tweet = pd.DataFrame(my_targetless_tweet)
my_targetless_tweet
targetless_tweet = my_targetless_tweet.to_numpy()
new_array = np.reshape(targetless_tweet, (-1, 1))
column = ['Tweet (no target)']
no_target_data = pd.DataFrame(data=new_array, columns=column)
no_target_data
def read_glove_vecs(glove_file):
with open(glove_file, 'r', encoding='utf8') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
i = 1
words_to_index = {}
index_to_words = {}
for w in sorted(words):
words_to_index[w] = i
index_to_words[i] = w
i = i + 1
return (words_to_index, index_to_words, word_to_vec_map)
word_to_vec_map['sid'] | code |
49124084/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49124084/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
data = pd.read_csv('../input/twitter-train/train.txt', delimiter='\n', header=None)
data_array = data.to_numpy()
x_array = np.reshape(data_array, (-1, 3))
column = ['Tweet', 'Target', 'Sentiment']
data = pd.DataFrame(data=x_array, columns=column)
data
my_dataset = data
my_dataset = my_dataset.drop(['Target', 'Sentiment'], axis=1)
my_target = data.drop(['Tweet', 'Sentiment'], axis=1)
for i in my_dataset.index:
x = my_dataset['Tweet'][i].find('$T$')
s = my_dataset['Tweet'][1].replace('$T$', my_target['Target'][0])
j = 0
my_targetless_tweet = []
for i in range(6248):
my_targetless_tweet.insert(i, my_dataset['Tweet'][i].replace('$T$', my_target['Target'][j]))
j = j + 1
my_targetless_tweet = pd.DataFrame(my_targetless_tweet)
my_targetless_tweet
targetless_tweet = my_targetless_tweet.to_numpy()
new_array = np.reshape(targetless_tweet, (-1, 1))
column = ['Tweet (no target)']
no_target_data = pd.DataFrame(data=new_array, columns=column)
no_target_data
import string, re
def remove_punct(x):
comp = re.compile('[%s\\d]' % re.escape(string.punctuation))
return ' '.join(comp.sub(' ', str(x)).split()).lower()
no_target_data['Tweet (no target)'] = no_target_data['Tweet (no target)'].apply(remove_punct)
no_target_data | code |
49124084/cell_3 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
data = pd.read_csv('../input/twitter-train/train.txt', delimiter='\n', header=None)
print(data) | code |
49124084/cell_10 | [
"text_plain_output_1.png"
] | word_to_index['sid'] | code |
49124084/cell_12 | [
"text_plain_output_1.png"
] | word_to_index['unk'] | code |
49124084/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
data = pd.read_csv('../input/twitter-train/train.txt', delimiter='\n', header=None)
data_array = data.to_numpy()
x_array = np.reshape(data_array, (-1, 3))
column = ['Tweet', 'Target', 'Sentiment']
data = pd.DataFrame(data=x_array, columns=column)
data
my_dataset = data
my_dataset = my_dataset.drop(['Target', 'Sentiment'], axis=1)
print(my_dataset)
my_target = data.drop(['Tweet', 'Sentiment'], axis=1)
print(my_target)
print(my_target['Target'][0])
for i in my_dataset.index:
x = my_dataset['Tweet'][i].find('$T$')
s = my_dataset['Tweet'][1].replace('$T$', my_target['Target'][0])
j = 0
my_targetless_tweet = []
for i in range(6248):
my_targetless_tweet.insert(i, my_dataset['Tweet'][i].replace('$T$', my_target['Target'][j]))
j = j + 1
print(my_targetless_tweet) | code |
50212280/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import category_encoders as ce
import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
aug_data.isnull().sum()
y = aug_data.target.astype('int')
X = aug_data.drop('target', axis=1)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
catboost_encoder = ce.CatBoostEncoder(cols=X.columns)
catboost_encoder.fit(X_train, y_train)
def catboost_encode_x_data(x_data):
encoder_x_data = x_data.copy()
encoder_x_data = catboost_encoder.transform(x_data)
encoder_x_data.index = x_data.index
return encoder_x_data
encoder_X_train = catboost_encode_x_data(X_train)
encoder_X_val = catboost_encode_x_data(X_val) | code |
50212280/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
aug_data.isnull().sum() | code |
50212280/cell_11 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
aug_data.isnull().sum()
y = aug_data.target.astype('int')
X = aug_data.drop('target', axis=1)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)
print('X_train: ', X_train.shape, 'y_train: ', y_train.shape, '\nX_val: ', X_val.shape, 'y_val: ', y_val.shape) | code |
50212280/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | encoder_X_test = catboost_encode_x_data(X_test)
y_test_predict = model.predict(encoder_X_test)
submit_data = pd.DataFrame({'label': y_test_predict}, index=X_test.index)
submit_data.to_csv('submission.csv')
!head submission.csv | code |
50212280/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
print('gender:', aug_data.gender.unique(), '\n')
print('enrolled_university:', aug_data.enrolled_university.unique(), '\n')
print('education_level:', aug_data.education_level.unique(), '\n')
print('major_discipline:', aug_data.major_discipline.unique(), '\n')
print('experience:', aug_data.experience.unique(), '\n')
print('company_size:', aug_data.company_size.unique(), '\n')
print('company_type:', aug_data.company_type.unique(), '\n')
print('last_new_job:', aug_data.last_new_job.unique(), '\n') | code |
50212280/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
def fill_null_data(df):
df.gender = df.gender.fillna('Other')
df.enrolled_university = df.enrolled_university.fillna('Unknown')
df.education_level = df.education_level.fillna('Unknown')
df.major_discipline = df.major_discipline.fillna('Unknown')
df.experience = df.experience.fillna('Unknown')
df.company_size = df.company_size.fillna('Unknown')
df.company_type = df.company_type.fillna('Unknown')
df.last_new_job = df.last_new_job.fillna('Unknown')
fill_null_data(aug_data)
X_test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv', index_col='enrollee_id')
fill_null_data(X_test)
X_test | code |
50212280/cell_15 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
import category_encoders as ce
import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
aug_data.isnull().sum()
y = aug_data.target.astype('int')
X = aug_data.drop('target', axis=1)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
catboost_encoder = ce.CatBoostEncoder(cols=X.columns)
catboost_encoder.fit(X_train, y_train)
def catboost_encode_x_data(x_data):
encoder_x_data = x_data.copy()
encoder_x_data = catboost_encoder.transform(x_data)
encoder_x_data.index = x_data.index
return encoder_x_data
encoder_X_train = catboost_encode_x_data(X_train)
encoder_X_val = catboost_encode_x_data(X_val)
from sklearn.metrics import mean_squared_error
from sklearn import metrics
from sklearn import svm
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import SGDRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
def calc_score(model):
scores = -1 * cross_val_score(model, encoder_X_train, y_train, cv=5, scoring='neg_mean_squared_error')
print('MAE score:', scores.mean())
print('SVR ->')
calc_score(svm.SVR())
print('XGBRegressor ->')
calc_score(XGBRegressor())
print('SGDRegressor ->')
calc_score(SGDRegressor())
print('GradientBoostingRegressor ->')
calc_score(GradientBoostingRegressor())
print('KNeighborsRegressor ->')
calc_score(KNeighborsRegressor())
print('MLPRegressor ->')
calc_score(MLPRegressor()) | code |
50212280/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import category_encoders as ce
import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum()
aug_data.isnull().sum()
y = aug_data.target.astype('int')
X = aug_data.drop('target', axis=1)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
catboost_encoder = ce.CatBoostEncoder(cols=X.columns)
catboost_encoder.fit(X_train, y_train)
def catboost_encode_x_data(x_data):
encoder_x_data = x_data.copy()
encoder_x_data = catboost_encoder.transform(x_data)
encoder_x_data.index = x_data.index
return encoder_x_data
encoder_X_train = catboost_encode_x_data(X_train)
encoder_X_val = catboost_encode_x_data(X_val)
model = GradientBoostingRegressor()
model.fit(encoder_X_train, y_train)
y_val_predict = model.predict(encoder_X_val)
error = mean_squared_error(y_val, y_val_predict)
print('MAE: ', error) | code |
50212280/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data | code |
50212280/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
aug_data = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv', index_col='enrollee_id')
aug_data = aug_data.sort_index()
aug_data
aug_data.isnull().sum() | code |
88104935/cell_4 | [
"text_plain_output_1.png"
] | import random
face = ['BlueF', 'BlackF', 'OrangeF', 'WhiteF']
face_weights = [2, 47, 3, 48]
eyes = ['BlueE', 'BrownE', 'GreenE', 'PurpleE', 'RedE', 'YellowE']
eye_weights = [20, 50, 20, 6, 3, 1]
hair = ['BlackdevH', 'DanH', 'DevH', 'PeteH', 'SophH']
hair_weights = [22, 25, 25, 3, 25]
mouth = ['frownM', 'indiffM', 'smileM', 'redroboM', 'blueroboM', 'zipM']
mouth_weights = [15, 25, 50, 2, 4, 4]
nose = ['DnoseN', 'PointN', 'TetnoseN']
nose_weights = [40, 55, 5]
glasses = ['leoG', 'blank']
glasses_weights = [15, 85]
total_images = 100
all_images = []
def create_new_image():
new_image = {}
new_image['Face'] = random.choices(face, face_weights)[0]
new_image['Eyes'] = random.choices(eyes, eye_weights)[0]
new_image['Hair'] = random.choices(hair, hair_weights)[0]
new_image['Mouth'] = random.choices(mouth, mouth_weights)[0]
new_image['Nose'] = random.choices(nose, nose_weights)[0]
new_image['Glasses'] = random.choices(glasses, glasses_weights)[0]
if new_image in all_images:
return create_new_image()
else:
return new_image
for i in range(total_images):
new_trait_image = create_new_image()
all_images.append(new_trait_image)
all_images[33] | code |
88104935/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import random
face = ['BlueF', 'BlackF', 'OrangeF', 'WhiteF']
face_weights = [2, 47, 3, 48]
eyes = ['BlueE', 'BrownE', 'GreenE', 'PurpleE', 'RedE', 'YellowE']
eye_weights = [20, 50, 20, 6, 3, 1]
hair = ['BlackdevH', 'DanH', 'DevH', 'PeteH', 'SophH']
hair_weights = [22, 25, 25, 3, 25]
mouth = ['frownM', 'indiffM', 'smileM', 'redroboM', 'blueroboM', 'zipM']
mouth_weights = [15, 25, 50, 2, 4, 4]
nose = ['DnoseN', 'PointN', 'TetnoseN']
nose_weights = [40, 55, 5]
glasses = ['leoG', 'blank']
glasses_weights = [15, 85]
total_images = 100
all_images = []
def create_new_image():
new_image = {}
new_image['Face'] = random.choices(face, face_weights)[0]
new_image['Eyes'] = random.choices(eyes, eye_weights)[0]
new_image['Hair'] = random.choices(hair, hair_weights)[0]
new_image['Mouth'] = random.choices(mouth, mouth_weights)[0]
new_image['Nose'] = random.choices(nose, nose_weights)[0]
new_image['Glasses'] = random.choices(glasses, glasses_weights)[0]
if new_image in all_images:
return create_new_image()
else:
return new_image
for i in range(total_images):
new_trait_image = create_new_image()
all_images.append(new_trait_image)
for item in all_images:
im1 = Image.open(f"../input/FacesDatanft/{item['Face']}.png").convert('RGBA')
im2 = Image.open(f"../input/FacesDatanft/{item['Eyes']}.png").convert('RGBA')
im4 = Image.open(f"../input/FacesDatanft/{item['Hair']}.png").convert('RGBA')
im5 = Image.open(f"../input/FacesDatanft/{item['Mouth']}.png").convert('RGBA')
im6 = Image.open(f"../input/FacesDatanft/{item['Nose']}.png").convert('RGBA')
com1 = Image.alpha_composite(im1, im2)
plt.imshow(com1)
plt.savefig('NFT.png') | code |
88104935/cell_5 | [
"text_plain_output_1.png"
] | import random
face = ['BlueF', 'BlackF', 'OrangeF', 'WhiteF']
face_weights = [2, 47, 3, 48]
eyes = ['BlueE', 'BrownE', 'GreenE', 'PurpleE', 'RedE', 'YellowE']
eye_weights = [20, 50, 20, 6, 3, 1]
hair = ['BlackdevH', 'DanH', 'DevH', 'PeteH', 'SophH']
hair_weights = [22, 25, 25, 3, 25]
mouth = ['frownM', 'indiffM', 'smileM', 'redroboM', 'blueroboM', 'zipM']
mouth_weights = [15, 25, 50, 2, 4, 4]
nose = ['DnoseN', 'PointN', 'TetnoseN']
nose_weights = [40, 55, 5]
glasses = ['leoG', 'blank']
glasses_weights = [15, 85]
total_images = 100
all_images = []
def create_new_image():
new_image = {}
new_image['Face'] = random.choices(face, face_weights)[0]
new_image['Eyes'] = random.choices(eyes, eye_weights)[0]
new_image['Hair'] = random.choices(hair, hair_weights)[0]
new_image['Mouth'] = random.choices(mouth, mouth_weights)[0]
new_image['Nose'] = random.choices(nose, nose_weights)[0]
new_image['Glasses'] = random.choices(glasses, glasses_weights)[0]
if new_image in all_images:
return create_new_image()
else:
return new_image
for i in range(total_images):
new_trait_image = create_new_image()
all_images.append(new_trait_image)
def all_images_unique(all_images):
seen = list()
return not any((i in seen or seen.append(i) for i in all_images))
print('Are all images unique?', all_images_unique(all_images))
i = 0
for item in all_images:
item['tokenId'] = i
i = i + 1
print(all_images) | code |
128000263/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
df_train.info() | code |
128000263/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error, r2_score, roc_curve, confusion_matrix, classification_report, accuracy_score, auc, log_loss
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost as xgb
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if not actual:
return 0.0
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
return score / min(len(actual), k)
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
le = LabelEncoder()
df_train['prognosis_label'] = le.fit_transform(df_train['prognosis'])
X = df_train.drop(['prognosis', 'prognosis_label'], axis=1)
y = df_train.pop('prognosis_label')
params_k = {'boosting_type': 'gbdt', 'objective': 'multiclass', 'num_class': 11, 'subsample': 0.6, 'subsample_freq': 3, 'learning_rate': 0.013, 'num_leaves': 2 ** 11 - 1, 'max_bin': 150, 'n_estimators': 750, 'boost_from_average': False, 'random_seed': 42}
xgb_params = {'max_depth': 6, 'max_bin': 256, 'subsample': 0.6, 'n_estimators': 30, 'learning_rate': 0.1, 'random_state': 1995, 'colsample_bytree': 0.12, 'objective': 'multi:softprob', 'booster': 'dart'}
xgb_basic_params = {'random_state': 13, 'objective': 'multi:softprob', 'eval_metric': 'map@3', 'learning_rate': 0.1, 'n_estimators': 5000, 'max_depth': 9}
skf = StratifiedKFold(n_splits=5, random_state=13, shuffle=True)
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[val_idx]
y_valid = y.iloc[val_idx]
model = xgb.XGBClassifier(**xgb_params)
model.fit(X_train, y_train)
pred_train = model.predict_proba(X_train)
pred_valid = model.predict_proba(X_valid)
train_score = log_loss(y_train, pred_train)
valid_score = log_loss(y_valid, pred_valid)
train_logloss.append(train_score)
valid_logloss.append(valid_score)
train_index = np.argsort(-pred_train)[:, :3]
valid_index = np.argsort(-pred_valid)[:, :3]
train_mapk_score = mapk(y_train.to_numpy().reshape(-1, 1), train_index, 3)
valid_mapk_score = mapk(y_valid.to_numpy().reshape(-1, 1), valid_index, 3)
train_map3.append(train_mapk_score)
valid_map3.append(valid_mapk_score)
test_predictions = model.predict_proba(df_test)
test_sorted_prediction_ids = np.argsort(-test_predictions, axis=1)
test_top_3_prediction_ids = test_sorted_prediction_ids[:, :3]
original_shape = test_top_3_prediction_ids.shape
test_top_3_predictions = le.inverse_transform(test_top_3_prediction_ids.reshape(-1, 1))
test_top_3_predictions = test_top_3_predictions.reshape(original_shape)
test_top_3_predictions[:10] | code |
128000263/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error, r2_score, roc_curve, confusion_matrix, classification_report, accuracy_score, auc, log_loss
from sklearn.model_selection import StratifiedKFold, GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost as xgb
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if not actual:
return 0.0
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
return score / min(len(actual), k)
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
X = df_train.drop(['prognosis', 'prognosis_label'], axis=1)
y = df_train.pop('prognosis_label')
params_k = {'boosting_type': 'gbdt', 'objective': 'multiclass', 'num_class': 11, 'subsample': 0.6, 'subsample_freq': 3, 'learning_rate': 0.013, 'num_leaves': 2 ** 11 - 1, 'max_bin': 150, 'n_estimators': 750, 'boost_from_average': False, 'random_seed': 42}
xgb_params = {'max_depth': 6, 'max_bin': 256, 'subsample': 0.6, 'n_estimators': 30, 'learning_rate': 0.1, 'random_state': 1995, 'colsample_bytree': 0.12, 'objective': 'multi:softprob', 'booster': 'dart'}
xgb_basic_params = {'random_state': 13, 'objective': 'multi:softprob', 'eval_metric': 'map@3', 'learning_rate': 0.1, 'n_estimators': 5000, 'max_depth': 9}
skf = StratifiedKFold(n_splits=5, random_state=13, shuffle=True)
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
print('FOLD:', fold + 1)
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[val_idx]
y_valid = y.iloc[val_idx]
model = xgb.XGBClassifier(**xgb_params)
model.fit(X_train, y_train)
pred_train = model.predict_proba(X_train)
pred_valid = model.predict_proba(X_valid)
train_score = log_loss(y_train, pred_train)
valid_score = log_loss(y_valid, pred_valid)
train_logloss.append(train_score)
valid_logloss.append(valid_score)
train_index = np.argsort(-pred_train)[:, :3]
valid_index = np.argsort(-pred_valid)[:, :3]
train_mapk_score = mapk(y_train.to_numpy().reshape(-1, 1), train_index, 3)
valid_mapk_score = mapk(y_valid.to_numpy().reshape(-1, 1), valid_index, 3)
train_map3.append(train_mapk_score)
valid_map3.append(valid_mapk_score)
print(f'Valid log_loss : {np.mean(valid_logloss):.5f} ± {np.std(valid_logloss):.5f} | Train log_loss : {np.mean(train_logloss):.5f} ± {np.std(train_logloss):.5f}')
print(f'Valid MAP@3 Score: {np.mean(valid_map3):.5f} ± {np.std(valid_map3):.5f} | Train MAP@3 Score: {np.mean(train_map3):.5f} ± {np.std(train_map3):.5f}')
print('') | code |
128000263/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error, r2_score, roc_curve, confusion_matrix, classification_report, accuracy_score, auc, log_loss
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost as xgb
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if not actual:
return 0.0
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
return score / min(len(actual), k)
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
le = LabelEncoder()
df_train['prognosis_label'] = le.fit_transform(df_train['prognosis'])
X = df_train.drop(['prognosis', 'prognosis_label'], axis=1)
y = df_train.pop('prognosis_label')
params_k = {'boosting_type': 'gbdt', 'objective': 'multiclass', 'num_class': 11, 'subsample': 0.6, 'subsample_freq': 3, 'learning_rate': 0.013, 'num_leaves': 2 ** 11 - 1, 'max_bin': 150, 'n_estimators': 750, 'boost_from_average': False, 'random_seed': 42}
xgb_params = {'max_depth': 6, 'max_bin': 256, 'subsample': 0.6, 'n_estimators': 30, 'learning_rate': 0.1, 'random_state': 1995, 'colsample_bytree': 0.12, 'objective': 'multi:softprob', 'booster': 'dart'}
xgb_basic_params = {'random_state': 13, 'objective': 'multi:softprob', 'eval_metric': 'map@3', 'learning_rate': 0.1, 'n_estimators': 5000, 'max_depth': 9}
skf = StratifiedKFold(n_splits=5, random_state=13, shuffle=True)
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[val_idx]
y_valid = y.iloc[val_idx]
model = xgb.XGBClassifier(**xgb_params)
model.fit(X_train, y_train)
pred_train = model.predict_proba(X_train)
pred_valid = model.predict_proba(X_valid)
train_score = log_loss(y_train, pred_train)
valid_score = log_loss(y_valid, pred_valid)
train_logloss.append(train_score)
valid_logloss.append(valid_score)
train_index = np.argsort(-pred_train)[:, :3]
valid_index = np.argsort(-pred_valid)[:, :3]
train_mapk_score = mapk(y_train.to_numpy().reshape(-1, 1), train_index, 3)
valid_mapk_score = mapk(y_valid.to_numpy().reshape(-1, 1), valid_index, 3)
train_map3.append(train_mapk_score)
valid_map3.append(valid_mapk_score)
test_predictions = model.predict_proba(df_test)
test_sorted_prediction_ids = np.argsort(-test_predictions, axis=1)
test_top_3_prediction_ids = test_sorted_prediction_ids[:, :3]
original_shape = test_top_3_prediction_ids.shape
test_top_3_predictions = le.inverse_transform(test_top_3_prediction_ids.reshape(-1, 1))
test_top_3_predictions = test_top_3_predictions.reshape(original_shape)
test_top_3_predictions[:10]
df_submission['prognosis'] = np.apply_along_axis(lambda x: np.array(' '.join(x), dtype='object'), 1, test_top_3_predictions)
df_submission['prognosis'][:10] | code |
128000263/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
print('count of the classes prognosis:', df_train['prognosis'].unique().shape[0]) | code |
128000263/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
import matplotlib.pyplot as plt
import lightgbm as lgb
import optuna
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.metrics import mean_absolute_error, r2_score, roc_curve, confusion_matrix, classification_report, accuracy_score, auc, log_loss
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128000263/cell_8 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
df_train.tail() | code |
128000263/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv').drop(['id'], axis=1)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv').drop(['id'], axis=1)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv')
df_train_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_test_real = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_real = pd.concat([df_train_real, df_test_real])
df_train = pd.concat([df_train, df_real])
df_train = df_train.drop_duplicates()
le = LabelEncoder()
df_train['prognosis_label'] = le.fit_transform(df_train['prognosis'])
print(le.classes_) | code |
320335/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv', usecols=['Producto_ID', 'Demanda_uni_equil'])
train_data['log_Dem'] = np.log(np.array(train_data['Demanda_uni_equil'].tolist()) + 1)
mean_data = train_data.groupby(train_data['Producto_ID']).mean()
test_data = pd.read_csv('../input/test.csv', usecols=['id', 'Producto_ID'])
target = np.zeros(test_data.shape[0])
log_target = np.zeros(test_data.shape[0])
for pid in mean_data.index:
target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['Demanda_uni_equil']
log_target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['log_Dem']
test_data['Demanda_uni_equil'] = np.exp(log_target) - 1
test_data.to_csv('result_groupmean_log.csv', index=False, columns=['id', 'Demanda_uni_equil'])
test_data[test_data['Producto_ID'] == 41]['id'] | code |
320335/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv', usecols=['Producto_ID', 'Demanda_uni_equil'])
train_data['log_Dem'] = np.log(np.array(train_data['Demanda_uni_equil'].tolist()) + 1)
mean_data = train_data.groupby(train_data['Producto_ID']).mean()
test_data = pd.read_csv('../input/test.csv', usecols=['id', 'Producto_ID'])
target = np.zeros(test_data.shape[0])
log_target = np.zeros(test_data.shape[0])
for pid in mean_data.index:
target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['Demanda_uni_equil']
log_target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['log_Dem']
test_data['Demanda_uni_equil'] = np.exp(log_target) - 1
test_data.to_csv('result_groupmean_log.csv', index=False, columns=['id', 'Demanda_uni_equil'])
test_data.shape | code |
320335/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv', usecols=['Producto_ID', 'Demanda_uni_equil'])
mean_data = train_data.groupby(train_data['Producto_ID']).mean()
print(mean_data) | code |
320335/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv', usecols=['Producto_ID', 'Demanda_uni_equil'])
train_data['log_Dem'] = np.log(np.array(train_data['Demanda_uni_equil'].tolist()) + 1)
mean_data = train_data.groupby(train_data['Producto_ID']).mean()
test_data = pd.read_csv('../input/test.csv', usecols=['id', 'Producto_ID'])
target = np.zeros(test_data.shape[0])
log_target = np.zeros(test_data.shape[0])
for pid in mean_data.index:
target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['Demanda_uni_equil']
log_target[test_data[test_data['Producto_ID'] == pid]['id'].values] = mean_data.ix[pid]['log_Dem']
test_data['Demanda_uni_equil'] = np.exp(log_target) - 1
print(test_data)
test_data.to_csv('result_groupmean_log.csv', index=False, columns=['id', 'Demanda_uni_equil']) | code |
312349/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7))
plt.title('Number of locations reported - Top 30') | code |
312349/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus')
for i, age_group in enumerate(age_groups):
print(age_group)
print(df[df.data_field == age_group].value)
print('') | code |
312349/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
312349/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus')
symptoms = ['confirmed_fever', 'confirmed_acute_fever', 'confirmed_arthralgia', 'confirmed_arthritis', 'confirmed_rash', 'confirmed_conjunctivitis', 'confirmed_eyepain', 'confirmed_headache', 'confirmed_malaise']
fig = plt.figure(figsize=(13, 13))
for symptom in symptoms:
df[df.data_field == symptom].value.plot()
plt.legend(symptoms, loc='best')
plt.title('Understanding symptoms of zika virus') | code |
312349/cell_3 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.head(3) | code |
312349/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df[df.data_field == 'confirmed_male'].value.plot()
df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best')
plt.title('Confirmed Male vs Female cases') | code |
333414/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues
lm.rsquared
lm = smf.ols(formula='Sales~ TV + Radio + Newspaper', data=data).fit()
lm.params
lm.summary()
lm = smf.ols(formula='Sales ~ TV + Radio', data=data).fit()
lm.summary() | code |
333414/cell_9 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues | code |
333414/cell_4 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params | code |
333414/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame) | code |
333414/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape | code |
333414/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues
lm.rsquared
lm = smf.ols(formula='Sales~ TV + Radio + Newspaper', data=data).fit()
lm.params | code |
333414/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.head() | code |
333414/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
data.plot(kind='scatter', x='TV', y='Sales')
plt.plot(frame, preds, c='red', linewidth=2) | code |
333414/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int() | code |
333414/cell_3 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
fig, axs = plt.subplots(1, 3, sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12, 4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2]) | code |
333414/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues
lm.rsquared
lm = smf.ols(formula='Sales~ TV + Radio + Newspaper', data=data).fit()
lm.params
lm.summary()
lm = smf.ols(formula='Sales ~ TV + Radio', data=data).fit()
lm.summary()
data2 = pd.DataFrame(data[['Radio', 'TV', 'Sales']])
data2['Predicted'] = lm.predict(data2)
data2['Residuals'] = data2['Sales'] - data2['Predicted']
data2.plot(kind='scatter', x='Predicted', y='Residuals')
data2.plot(kind='scatter', x='TV', y='Residuals') | code |
333414/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues
lm.rsquared | code |
333414/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.shape
#Check assumption 2
#Testing the assumptions of a Linear Relationship between the independent and dependent varaible (s)
#If the relationship between the Independent Variable (IV) and Dependent Varaible (DV) is not linear, the results of regression will under-estimate the true relationship
#This under-estimation can present 2 major problems
#1.) an increased chance of a Type II error for that IV
#2.) and with multiple regression an increases risk of Type I errors (over-estimation) for other IVs that share variance with that IV
#How to test for linearity:
#1.)Scatterplots
fig, axs=plt.subplots(1,3,sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(12,4))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales~TV', data=data).fit()
lm.params
frame = pd.DataFrame({'TV': [50]})
lm.predict(frame)
frame = pd.DataFrame(data.TV)
preds = lm.predict(frame)
lm.conf_int()
lm.pvalues
lm.rsquared
lm = smf.ols(formula='Sales~ TV + Radio + Newspaper', data=data).fit()
lm.params
lm.summary() | code |
333414/cell_5 | [
"text_plain_output_1.png"
] | 7.032594 + 0.047537 * 50 | code |
50229416/cell_13 | [
"text_html_output_1.png"
] | from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
dtc = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(dtc, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
print(f'Best paramters: {best_params})')
dtc_best = DecisionTreeClassifier(**best_params)
dtc_best.fit(X_train, y_train)
print_score(dtc_best, X_train, y_train, X_test, y_test, train=True)
print_score(dtc_best, X_train, y_train, X_test, y_test, train=False) | code |
50229416/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
lr = LabelEncoder()
for i in categorial_col:
df[i] = lr.fit_transform(df[i])
df[categorial_col.columns].head() | code |
50229416/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum() | code |
50229416/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
df.describe() | code |
50229416/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df.head() | code |
50229416/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape | code |
50229416/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50229416/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
categorial_col.head() | code |
50229416/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(30, 30))
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', annot_kws={'size': 15}) | code |
50229416/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve,confusion_matrix, f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
dtc = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(dtc, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
dtc_best = DecisionTreeClassifier(**best_params)
dtc_best.fit(X_train, y_train)
y_train_pred = dtc_best.predict(X_train)
y_train_prob = dtc_best.predict_proba(X_train)[0, 1]
y_test_pred = dtc_best.predict(X_test)
y_test_prob = dtc_best.predict_proba(X_test)[:, 1]
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, confusion_matrix, f1_score
accuracy_score(y_train, y_train_pred) | code |
50229416/cell_16 | [
"text_html_output_1.png"
] | from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve,confusion_matrix, f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
dtc = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(dtc, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
dtc_best = DecisionTreeClassifier(**best_params)
dtc_best.fit(X_train, y_train)
y_train_pred = dtc_best.predict(X_train)
y_train_prob = dtc_best.predict_proba(X_train)[0, 1]
y_test_pred = dtc_best.predict(X_test)
y_test_prob = dtc_best.predict_proba(X_test)[:, 1]
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, confusion_matrix, f1_score
accuracy_score(y_train, y_train_pred)
accuracy_score(y_test, y_test_pred) | code |
50229416/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve,confusion_matrix, f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
dtc = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(dtc, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
dtc_best = DecisionTreeClassifier(**best_params)
dtc_best.fit(X_train, y_train)
y_train_pred = dtc_best.predict(X_train)
y_train_prob = dtc_best.predict_proba(X_train)[0, 1]
y_test_pred = dtc_best.predict(X_test)
y_test_prob = dtc_best.predict_proba(X_test)[:, 1]
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, confusion_matrix, f1_score
accuracy_score(y_train, y_train_pred)
roc_auc_score(y_test, y_test_prob) | code |
50229416/cell_14 | [
"text_html_output_1.png"
] | from IPython.display import Image
from io import StringIO
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pydot
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
dtc = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(dtc, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
dtc_best = DecisionTreeClassifier(**best_params)
dtc_best.fit(X_train, y_train)
from IPython.display import Image
from io import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns)
features.remove('Attrition')
dot_data = StringIO()
export_graphviz(dtc_best, out_file=dot_data, feature_names=features, filled=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png()) | code |
50229416/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts() | code |
50229416/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
print_score(dtc, X_train, y_train, X_test, y_test, train=True)
print_score(dtc, X_train, y_train, X_test, y_test, train=False) | code |
50229416/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any() | code |
88086811/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes | code |
88086811/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Generation.dtypes
dF_Generation.isnull().sum()
dF_Generation.isnull().sum()
Generation_Başlangıç = dF_Generation['DateTime'].min()
Generation_Başlangıç | code |
88086811/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.isnull().sum() | code |
88086811/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample.dtypes
dF_Sample.isnull().sum() | code |
88086811/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.isnull().sum()
dF_Temperature = dF_Temperature.ffill(axis=0)
dF_Temperature.isnull().sum()
dF_Temperature['DateTime'].max() | code |
88086811/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.dtypes | code |
88086811/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.head() | code |
88086811/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.isnull().sum()
dF_Temperature = dF_Temperature.ffill(axis=0)
dF_Temperature.isnull().sum() | code |
88086811/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature['DateTime'] = pd.to_datetime(dF_Temperature['DateTime'])
dF_Temperature.dtypes
dF_Generation.dtypes
dF_Sample.dtypes
dF_Temperature.dtypes
dF_Temperature.isnull().sum()
dF_Generation.isnull().sum()
dF_Temperature = dF_Temperature.ffill(axis=0)
dF_Generation.isnull().sum()
dF_Temperature.isnull().sum()
dF_Sample.isnull().sum()
Generation_Başlangıç = dF_Generation['DateTime'].min()
Generation_Başlangıç
Generation_Bitiş = dF_Generation['DateTime'].max()
Generation_Bitiş
Sample_Başlangıç = dF_Sample['DateTime'].min()
Sample_Bitiş = dF_Sample['DateTime'].max()
dF_Temp_G = dF_Temperature[dF_Temperature['DateTime'].isin(pd.date_range(Generation_Başlangıç, Generation_Bitiş))]
dF_Temp_S = dF_Temperature[dF_Temperature['DateTime'].isin(pd.date_range(Sample_Başlangıç, Sample_Bitiş))]
dF_Gen_Temp = pd.merge(dF_Temp, dF_Generation, on='DateTime', how='inner') | code |
88086811/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.head() | code |
88086811/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88086811/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes | code |
88086811/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.dtypes
dF_Temperature.isnull().sum()
dF_Temperature = dF_Temperature.ffill(axis=0)
dF_Temperature.isnull().sum()
dF_Temperature['DateTime'].min() | code |
88086811/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Generation.dtypes
dF_Generation.isnull().sum()
dF_Generation.isnull().sum() | code |
88086811/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample.head() | code |
88086811/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample['DateTime'] = pd.to_datetime(dF_Sample['DateTime'])
dF_Generation['DateTime'] = pd.to_datetime(dF_Generation['DateTime'])
dF_Temperature = pd.read_csv('../input/enerjisa-enerji-veri-maraton/temperature.csv', sep=';')
dF_Temperature.dtypes
dF_Temperature.dtypes | code |
88086811/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Generation.dtypes | code |
88086811/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Sample = pd.read_csv('../input/enerjisa-enerji-veri-maraton/sample_submission.csv')
dF_Sample.dtypes | code |
88086811/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Generation.dtypes
dF_Generation.isnull().sum()
dF_Generation.isnull().sum()
Generation_Bitiş = dF_Generation['DateTime'].max()
Generation_Bitiş | code |
88086811/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dF_Generation = pd.read_csv('../input/enerjisa-enerji-veri-maraton/generation.csv', sep=';')
dF_Generation.columns = ['DateTime', 'Generation']
dF_Generation.dtypes
dF_Generation.dtypes
dF_Generation.isnull().sum() | code |
34147803/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
df = pd.read_csv('../input/programs-broadcast-by-disney-csv/Kids TV Data.csv')
df['First Aired'].value_counts().plot(kind='bar') | code |
34147803/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
df = pd.read_csv('../input/programs-broadcast-by-disney-csv/Kids TV Data.csv')
df.head() | code |
34147803/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34147803/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
df = pd.read_csv('../input/programs-broadcast-by-disney-csv/Kids TV Data.csv')
df['Series Type'].value_counts().plot(kind='bar') | code |
105216483/cell_13 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
dataset_train = data_path_train
dataset_train['Churn'].value_counts() | code |
105216483/cell_9 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
dataset_train = data_path_train
dataset_train.describe() | code |
105216483/cell_23 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
def evaluate_for_models(models, X, y):
results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []})
for model in models:
score = cross_val_score(model, X, y, scoring='f1')
new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()}
results = results.append(new_result, ignore_index=True)
return results.sort_values(by=['ScoreMean', 'Score Standard Deviation'])
dataset_train = data_path_train
dataset_test = data_path_test
churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes']
not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No']
new_dataset_train = not_churn_dataset_train.copy(deep=True)
for i in range(3):
new_dataset_train = new_dataset_train.append(churn_dataset_train)
new_dataset_train
dataset_train = new_dataset_train.sample(frac=1, random_state=42)
dataset_train['Churn'].value_counts()
encoded_train = pd.get_dummies(dataset_train, columns=['location_code'])
encoded_test = pd.get_dummies(dataset_test, columns=['location_code'])
encoded_train['Churn'] = encoded_train['Churn'].str.lower()
for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']:
encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0})
for col in ['intertiol_plan', 'voice_mail_plan']:
encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0})
encoded_train.tail() | code |
105216483/cell_30 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
def evaluate_for_models(models, X, y):
results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []})
for model in models:
score = cross_val_score(model, X, y, scoring='f1')
new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()}
results = results.append(new_result, ignore_index=True)
return results.sort_values(by=['ScoreMean', 'Score Standard Deviation'])
dataset_train = data_path_train
dataset_test = data_path_test
churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes']
not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No']
new_dataset_train = not_churn_dataset_train.copy(deep=True)
for i in range(3):
new_dataset_train = new_dataset_train.append(churn_dataset_train)
new_dataset_train
dataset_train = new_dataset_train.sample(frac=1, random_state=42)
dataset_train['Churn'].value_counts()
encoded_train = pd.get_dummies(dataset_train, columns=['location_code'])
encoded_test = pd.get_dummies(dataset_test, columns=['location_code'])
encoded_train['Churn'] = encoded_train['Churn'].str.lower()
for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']:
encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0})
for col in ['intertiol_plan', 'voice_mail_plan']:
encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0})
X = encoded_train.drop(columns=['Churn'])
y = encoded_train.Churn
scaler = StandardScaler()
stdscaled = X.copy(deep=True)
stdscaled[stdscaled.columns] = scaler.fit_transform(stdscaled[stdscaled.columns])
scaler = MinMaxScaler()
minscaled = X.copy(deep=True)
minscaled[minscaled.columns] = scaler.fit_transform(minscaled[minscaled.columns])
evaluate_for_models(models, X, y) | code |
105216483/cell_11 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
dataset_train = data_path_train
dataset_train.describe() | code |
105216483/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
dataset_train = data_path_train
churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes']
not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No']
new_dataset_train = not_churn_dataset_train.copy(deep=True)
for i in range(3):
new_dataset_train = new_dataset_train.append(churn_dataset_train)
new_dataset_train
dataset_train = new_dataset_train.sample(frac=1, random_state=42)
dataset_train['Churn'].value_counts() | code |
105216483/cell_28 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
def evaluate_for_models(models, X, y):
results = pd.DataFrame({'Model': [], 'ScoreMean': [], 'Score Standard Deviation': []})
for model in models:
score = cross_val_score(model, X, y, scoring='f1')
new_result = {'Model': model.__class__.__name__, 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()}
results = results.append(new_result, ignore_index=True)
return results.sort_values(by=['ScoreMean', 'Score Standard Deviation'])
dataset_train = data_path_train
dataset_test = data_path_test
churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'Yes']
not_churn_dataset_train = dataset_train.loc[dataset_train.Churn == 'No']
new_dataset_train = not_churn_dataset_train.copy(deep=True)
for i in range(3):
new_dataset_train = new_dataset_train.append(churn_dataset_train)
new_dataset_train
dataset_train = new_dataset_train.sample(frac=1, random_state=42)
dataset_train['Churn'].value_counts()
encoded_train = pd.get_dummies(dataset_train, columns=['location_code'])
encoded_test = pd.get_dummies(dataset_test, columns=['location_code'])
encoded_train['Churn'] = encoded_train['Churn'].str.lower()
for col in ['intertiol_plan', 'voice_mail_plan', 'Churn']:
encoded_train[col] = encoded_train[col].map({'yes': 1, 'no': 0})
for col in ['intertiol_plan', 'voice_mail_plan']:
encoded_test[col] = encoded_test[col].map({'yes': 1, 'no': 0})
X = encoded_train.drop(columns=['Churn'])
y = encoded_train.Churn
scaler = StandardScaler()
stdscaled = X.copy(deep=True)
stdscaled[stdscaled.columns] = scaler.fit_transform(stdscaled[stdscaled.columns])
scaler = MinMaxScaler()
minscaled = X.copy(deep=True)
minscaled[minscaled.columns] = scaler.fit_transform(minscaled[minscaled.columns])
minscaled.head() | code |
105216483/cell_8 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import pandas as pd
data_path_train = '/kaggle/input/cs-3110-mini-project/train.csv'
data_path_test = '/kaggle/input/cs-3110-mini-project/test.csv'
dataset_train = pd.read_csv(data_path_train)
dataset_test = pd.read_csv(data_path_test)
dataset_train.drop(['Unnamed: 20'], axis=1, inplace=True)
dataset_test.drop(['Unnamed: 19', 'Unnamed: 20'], axis=1, inplace=True)
subset_train = dataset_train.columns.drop('customer_id')
duplicates_droped_dataset_train = dataset_train.drop_duplicates(subset=subset_train)
subset_test = dataset_test.columns.drop('customer_id')
duplicates_droped_dataset_test = dataset_test.drop_duplicates(subset=subset_test)
nan_added_dataset_train = duplicates_droped_dataset_train.copy()
nan_added_dataset_test = duplicates_droped_dataset_test.copy()
nan_added_dataset_train.loc[(nan_added_dataset_train['total_day_min'] < 0) & (nan_added_dataset_train['total_day_calls'] < 0) & (nan_added_dataset_train['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
nan_added_dataset_test.loc[(nan_added_dataset_test['total_day_min'] < 0) & (nan_added_dataset_test['total_day_calls'] < 0) & (nan_added_dataset_test['total_day_charge'] < 0), ('total_day_min', 'total_day_calls', 'total_day_charge')] = 0
for col in ['total_day_min', 'total_day_calls', 'total_day_charge', 'total_eve_min', 'total_eve_calls', 'total_eve_charge', 'total_night_minutes', 'total_night_calls', 'total_night_charge', 'total_intl_minutes', 'total_intl_calls']:
nan_added_dataset_train[col] = nan_added_dataset_train[col].abs()
nan_added_dataset_test[col] = nan_added_dataset_test[col].abs()
odm_handled_dataset_train = nan_added_dataset_train.copy()
odm_handled_dataset_test = nan_added_dataset_test.copy()
for col in ['account_length', 'location_code']:
odm_handled_dataset_train[col].fillna(odm_handled_dataset_train[col].median(), inplace=True)
odm_handled_dataset_test[col].fillna(odm_handled_dataset_test[col].median(), inplace=True)
odm_handled_dataset_train.loc[odm_handled_dataset_train['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_test.loc[odm_handled_dataset_test['intertiol_plan'].isnull(), 'intertiol_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] == 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['number_vm_messages'] != 0) & odm_handled_dataset_train.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] == 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'no'
odm_handled_dataset_test.loc[(odm_handled_dataset_test['number_vm_messages'] != 0) & odm_handled_dataset_test.voice_mail_plan.isnull(), 'voice_mail_plan'] = 'yes'
odm_handled_dataset_train.loc[(odm_handled_dataset_train['voice_mail_plan'] == 'no') & (odm_handled_dataset_train['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test['voice_mail_plan'] == 'no') & (odm_handled_dataset_test['number_vm_messages'] != 0), 'number_vm_messages'] = 0
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'yes') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_train.loc[odm_handled_dataset_train.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_train.loc[(odm_handled_dataset_train.voice_mail_plan == 'no') & odm_handled_dataset_train.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'yes') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = odm_handled_dataset_test.loc[odm_handled_dataset_test.voice_mail_plan == 'yes', 'number_vm_messages'].median()
odm_handled_dataset_test.loc[(odm_handled_dataset_test.voice_mail_plan == 'no') & odm_handled_dataset_test.number_vm_messages.isnull(), 'number_vm_messages'] = 0
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_train['total_day_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_train['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_train['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_train['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_train['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_train['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_train['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_train['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_train['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_train = odm_handled_dataset_train.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_train['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_train['customer_service_calls'].fillna(odm_handled_dataset_train['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.dropna(subset=['Churn'], inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_charge'])
odm_handled_dataset_test['total_day_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_day_min'])
odm_handled_dataset_test['total_day_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_day_min', 'total_day_charge'])
odm_handled_dataset_test['total_day_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_min'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_eve_min'])
odm_handled_dataset_test['total_eve_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_eve_min', 'total_eve_charge'])
odm_handled_dataset_test['total_eve_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_charge'])
odm_handled_dataset_test['total_night_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_night_minutes'])
odm_handled_dataset_test['total_night_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_night_minutes', 'total_night_charge'])
odm_handled_dataset_test['total_night_calls'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_minutes'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['location_code', 'total_intl_minutes'])
odm_handled_dataset_test['total_intl_charge'].ffill(inplace=True)
odm_handled_dataset_test = odm_handled_dataset_test.sort_values(['total_intl_minutes', 'total_intl_charge'])
odm_handled_dataset_test['total_intl_calls'].ffill(inplace=True)
odm_handled_dataset_test['customer_service_calls'].fillna(odm_handled_dataset_test['customer_service_calls'].median(), inplace=True)
odm_handled_dataset_train.loc[(odm_handled_dataset_train['total_intl_calls'] == 0) & (odm_handled_dataset_train['total_intl_minutes'] > 0) & (odm_handled_dataset_train['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_test.loc[(odm_handled_dataset_test['total_intl_calls'] == 0) & (odm_handled_dataset_test['total_intl_minutes'] > 0) & (odm_handled_dataset_test['total_intl_charge'] > 0), 'total_intl_calls'] = 1
odm_handled_dataset_train = odm_handled_dataset_train.sort_index()
odm_handled_dataset_test = odm_handled_dataset_test.sort_index()
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_min == 2283.9, 'total_day_min'] = 283.9
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_eve_min == 5186.4, 'total_eve_min'] = 186.4
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_minutes == 19700.0, 'total_night_minutes'] = 197.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_day_calls == 10700.0, 'total_day_calls'] = 107.0
odm_handled_dataset_train.loc[odm_handled_dataset_train.total_night_charge == 900.15, 'total_night_charge'] = 9.15
pre_processed_dataset_train = odm_handled_dataset_train
pre_processed_dataset_test = odm_handled_dataset_test
data_path_train = pre_processed_dataset_train
data_path_test = pre_processed_dataset_test
rs = 42
models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), RandomForestClassifier(max_depth=40, n_estimators=1050, random_state=rs), SVC(max_iter=10000), LinearSVC(max_iter=10000), XGBClassifier(eval_metric='logloss', use_label_encoder=False), LogisticRegression(), GradientBoostingClassifier(random_state=rs), BaggingClassifier(XGBClassifier(eval_metric='logloss', use_label_encoder=False), random_state=rs), BaggingClassifier(GradientBoostingClassifier(random_state=rs), random_state=rs)]
dataset_train = data_path_train
dataset_train.head() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.