path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
33118743/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) def get_im_with_same_ioshape(file_path, name, show=False, mode='train'): train = [] test = [] with open(file_path + name, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape != im_out.shape: return None for im in all_im['train']: im_in = np.array(im['input']) im_out = np.array(im['output']) mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8') train.append((im_in, im_out, mask)) if mode == 'train': for im in all_im['test']: im_in = np.array(im['input']) im_out = np.array(im['output']) test.append((im_in, im_out)) if mode == 'test': for im in all_im['test']: im_in = np.array(im['input']) test.append(im_in) return (train, test) train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False) def get_features(input_): im_in, im_out, mask = input_ features = np.zeros((sum(sum(mask)), 8)) colors = np.zeros(sum(sum(mask))) f = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if mask[y, x] == 1: pix_exp = np.zeros(8) n_p = 0 for dy in range(-1, 2): for dx in range(-1, 2): if dy != 0 or dx != 0: if dx + x >= 0 and dy + y >= 0 and (dx + x < mask.shape[1]) and (dy + y < mask.shape[0]): pix_exp[n_p] = im_in[y + dy, x + dx] else: pix_exp[n_p] = -1 n_p += 1 features[f] = pix_exp colors[f] = im_out[y, x] f += 1 return (features, colors) def get_cf(train): features_set = [] colors_set = [] for in_out_mask in train: features, colors = get_features(in_out_mask) features_set += list(features) colors_set += list(colors) features_set_min = np.unique(np.array(features_set), axis=0) colors_min = np.zeros(len(features_set_min)) for n, feature in enumerate(features_set): if feature in features_set_min: for i, feature_uniq in enumerate(features_set_min): if str(feature_uniq) == str(feature): break colors_min[i] = colors_set[n] return (colors_min, features_set_min) colors_min, features_set_min = get_cf(train) print(colors_min, '\n') print(features_set_min)
code
33118743/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) def get_im_with_same_ioshape(file_path, name, show=False, mode='train'): train = [] test = [] with open(file_path + name, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape != im_out.shape: return None for im in all_im['train']: im_in = np.array(im['input']) im_out = np.array(im['output']) mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8') train.append((im_in, im_out, mask)) if mode == 'train': for im in all_im['test']: im_in = np.array(im['input']) im_out = np.array(im['output']) test.append((im_in, im_out)) if mode == 'test': for im in all_im['test']: im_in = np.array(im['input']) test.append(im_in) return (train, test) train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False) test[0][0]
code
33118743/cell_3
[ "text_plain_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) print('Same:', len(same_shape), 'All:', len(os.listdir(evaluation_path)))
code
33118743/cell_10
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) def get_im_with_same_ioshape(file_path, name, show=False, mode='train'): train = [] test = [] with open(file_path + name, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape != im_out.shape: return None for im in all_im['train']: im_in = np.array(im['input']) im_out = np.array(im['output']) mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8') train.append((im_in, im_out, mask)) if mode == 'train': for im in all_im['test']: im_in = np.array(im['input']) im_out = np.array(im['output']) test.append((im_in, im_out)) if mode == 'test': for im in all_im['test']: im_in = np.array(im['input']) test.append(im_in) return (train, test) train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False) def get_features(input_): im_in, im_out, mask = input_ features = np.zeros((sum(sum(mask)), 8)) colors = np.zeros(sum(sum(mask))) f = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if mask[y, x] == 1: pix_exp = np.zeros(8) n_p = 0 for dy in range(-1, 2): for dx in range(-1, 2): if dy != 0 or dx != 0: if dx + x >= 0 and dy + y >= 0 and (dx + x < mask.shape[1]) and (dy + y < mask.shape[0]): pix_exp[n_p] = im_in[y + dy, x + dx] else: pix_exp[n_p] = -1 n_p += 1 features[f] = pix_exp colors[f] = im_out[y, x] f += 1 return (features, colors) def get_cf(train): features_set = [] colors_set = [] for in_out_mask in train: features, colors = get_features(in_out_mask) features_set += list(features) colors_set += list(colors) features_set_min = np.unique(np.array(features_set), axis=0) colors_min = np.zeros(len(features_set_min)) for n, feature in enumerate(features_set): if feature in features_set_min: for i, feature_uniq in enumerate(features_set_min): if str(feature_uniq) == str(feature): break colors_min[i] = colors_set[n] return (colors_min, features_set_min) colors_min, features_set_min = get_cf(train) def make_pred(im_in, features, colors): im_out = im_in.copy() f = 0 for y in range(im_in.shape[0]): for x in range(im_in.shape[1]): pix_exp = np.zeros(8) n_p = 0 for dy in range(-1, 2): for dx in range(-1, 2): if dy != 0 or dx != 0: if dx + x >= 0 and dy + y >= 0 and (dx + x < im_in.shape[1]) and (dy + y < im_in.shape[0]): pix_exp[n_p] = im_in[y + dy, x + dx] else: pix_exp[n_p] = -1 n_p += 1 for n, f in enumerate(features): if str(f) == str(pix_exp): im_out[y, x] = colors[n] return im_out pred = make_pred(test[0][0], features_set_min, colors_min) er = [] for N in tqdm(range(len(same_shape[:2]))): data = get_im_with_same_ioshape(evaluation_path, same_shape[N]) if data != None: train, test = data colors, features = get_cf(train) pred = make_pred(test[0], features, colors) d = np.sum(np.where(np.nan_to_num((pred - test[1]) / (pred - test[1]), 0) != 0, 1, 0)) er.append(d) if d == 0: print('Uhu!!!') er.sort() print(er)
code
88095138/cell_9
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0]
code
88095138/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyradox_tabular.data import DataLoader from pyradox_tabular.data_config import DataConfig from pyradox_tabular.model_config import TabTransformerConfig from pyradox_tabular.nn import TabTransformer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={}) data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2) data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2) data_test = DataLoader.from_df(x_test, batch_size=64 * 2) model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2]) model = TabTransformer.from_config(data_config, model_config, name='tab_transformer') model.compile(optimizer='adam', loss='categorical_crossentropy') model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64) test_preds = model.predict(data_test, batch_size=64) test_preds_decoded = oe.inverse_transform(test_preds) test_preds_decoded_inversed = le.inverse_transform(test_preds_decoded) test_preds_decoded_inversed
code
88095138/cell_23
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from pyradox_tabular.data import DataLoader from pyradox_tabular.data_config import DataConfig from pyradox_tabular.model_config import TabTransformerConfig from pyradox_tabular.nn import TabTransformer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={}) data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2) data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2) data_test = DataLoader.from_df(x_test, batch_size=64 * 2) model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2]) model = TabTransformer.from_config(data_config, model_config, name='tab_transformer') model.compile(optimizer='adam', loss='categorical_crossentropy') model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64) test_preds = model.predict(data_test, batch_size=64) test_preds_decoded = oe.inverse_transform(test_preds) test_preds_decoded
code
88095138/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyradox_tabular.data import DataLoader from pyradox_tabular.data_config import DataConfig from pyradox_tabular.model_config import TabTransformerConfig from pyradox_tabular.nn import TabTransformer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={}) data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2) data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2) data_test = DataLoader.from_df(x_test, batch_size=64 * 2) model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2]) model = TabTransformer.from_config(data_config, model_config, name='tab_transformer') model.compile(optimizer='adam', loss='categorical_crossentropy') model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64)
code
88095138/cell_1
[ "text_plain_output_1.png" ]
!pip install pyradox-tabular -q import pandas as pd import numpy as np import sklearn from pyradox_tabular.data import DataLoader from pyradox_tabular.data_config import DataConfig from pyradox_tabular.model_config import TabTransformerConfig from pyradox_tabular.nn import TabTransformer
code
88095138/cell_28
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) sub = pd.read_csv('../input/tabular-playground-series-feb-2022/sample_submission.csv') sub.groupby(['target']).count()
code
88095138/cell_16
[ "text_plain_output_1.png" ]
from pyradox_tabular.data import DataLoader from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2) data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2) data_test = DataLoader.from_df(x_test, batch_size=64 * 2)
code
88095138/cell_24
[ "text_plain_output_1.png" ]
from pyradox_tabular.data import DataLoader from pyradox_tabular.data_config import DataConfig from pyradox_tabular.model_config import TabTransformerConfig from pyradox_tabular.nn import TabTransformer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.utils import shuffle import pandas as pd test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv') train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv') from sklearn.utils import shuffle train = shuffle(train) features = [c for c in train.columns if c not in ('target', 'row_id')] target = 'target' x_test = test[features] y = train[target] X = train[features] numerical_features = X.columns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) from sklearn.preprocessing import OneHotEncoder oe = OneHotEncoder() target = oe.fit_transform(y.reshape(-1, 1)) target = target.toarray() targetyDF = pd.DataFrame(target) yDF = targetyDF X_train = X[:87500] y_train = yDF[:87500] X_valid = X[87500:] y_valid = yDF[87500:] X_train.shape[0] + X_valid.shape[0] == X.shape[0] from sklearn.preprocessing import RobustScaler RobustScaler_transformer = RobustScaler().fit(X_train.values) X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values) X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values) X_testRobustScaler = RobustScaler_transformer.transform(x_test.values) X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features)) X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features)) x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features)) data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={}) data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2) data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2) data_test = DataLoader.from_df(x_test, batch_size=64 * 2) model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2]) model = TabTransformer.from_config(data_config, model_config, name='tab_transformer') model.compile(optimizer='adam', loss='categorical_crossentropy') model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64) test_preds = model.predict(data_test, batch_size=64) test_preds_decoded = oe.inverse_transform(test_preds) test_preds_decoded_inversed = le.inverse_transform(test_preds_decoded)
code
106198657/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X df2 = pd.DataFrame({'Pregnancies': [0], 'Glucose': [80], 'BloodPressure': [72], 'Skinthickness': [0], 'Insulin': [0], 'BMI': [23], 'DiabetesPedigreeFunction': [0.5], 'Age': [30], 'Outcome': [0]}) df2 = data.append(df2) df2
code
106198657/cell_13
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X)
code
106198657/cell_9
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) plt.plot(fpr, tpr, label='data 1') plt.legend(loc=4) plt.show()
code
106198657/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X
code
106198657/cell_26
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) df2 = pd.DataFrame({'Pregnancies': [0], 'Glucose': [80], 'BloodPressure': [72], 'Skinthickness': [0], 'Insulin': [0], 'BMI': [23], 'DiabetesPedigreeFunction': [0.5], 'Age': [30], 'Outcome': [0]}) df2 = data.append(df2) df2 X_test = df2[['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']][768:] model2 = LogisticRegression(solver='liblinear', C=10.0, random_state=0) model2.fit(X, y) y_pred = model2.predict(X_test) y_pred
code
106198657/cell_11
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_
code
106198657/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X model = LogisticRegression(solver='liblinear', C=10.0, random_state=0) model.fit(X, y)
code
106198657/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106198657/cell_7
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) print('Accuracy:', metrics.accuracy_score(y_test, y_pred))
code
106198657/cell_18
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X) logreg.predict(X) logreg.score(X, y) confusion_matrix(y, logreg.predict(X)) cm = confusion_matrix (y , logreg.predict(X)) fig,ax = plt.subplots(figsize= (8,8)) ax.imshow(cm) ax.grid(False) ax.xaxis.set(ticks=(0,1), ticklabels=('Predicted 0s', 'Predicted 1s')) ax.yaxis.set(ticks=(0,1), ticklabels=('Predicted 0s', 'Predicted 1s')) ax.set_ylim(1.5, -0.5) for i in range(2): for j in range(2): ax.text(j,i, cm[i,j], ha='center', va='center', color ='red') plt.show() print(classification_report(y, logreg.predict(X)))
code
106198657/cell_8
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) plt.plot(fpr, tpr, label='data 1') plt.legend(loc=4) plt.show()
code
106198657/cell_15
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X) logreg.predict(X) logreg.score(X, y)
code
106198657/cell_16
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X) logreg.predict(X) logreg.score(X, y) confusion_matrix(y, logreg.predict(X))
code
106198657/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data
code
106198657/cell_17
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X) logreg.predict(X) logreg.score(X, y) confusion_matrix(y, logreg.predict(X)) cm = confusion_matrix(y, logreg.predict(X)) fig, ax = plt.subplots(figsize=(8, 8)) ax.imshow(cm) ax.grid(False) ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s')) ax.yaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s')) ax.set_ylim(1.5, -0.5) for i in range(2): for j in range(2): ax.text(j, i, cm[i, j], ha='center', va='center', color='red') plt.show()
code
106198657/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X model2 = LogisticRegression(solver='liblinear', C=10.0, random_state=0) model2.fit(X, y)
code
106198657/cell_14
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes-dataset/diabetes.csv') data X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']) y = data.Outcome X logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_ logreg.predict_proba(X) logreg.predict(X)
code
106198657/cell_10
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_
code
106198657/cell_12
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) fpr, tpr, _ = metrics.roc_curve(y_test, y_pred) y_pred_proba = logreg.predict_proba(X_test)[:, 1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) from sklearn.metrics import classification_report, confusion_matrix logreg.classes_ logreg.intercept_ logreg.coef_
code
1008193/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) trn.head()
code
1008193/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008193/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) print('Train set: ', trn.shape) print('Test set: ', tst.shape)
code
17111364/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.index = newdata['name'] newdata.drop(['name', 'url', 'phone', 'listed_in(city)', 'listed_in(type)_x', 'address', 'dish_liked', 'listed_in(type)_y', 'menu_item', 'cuisines', 'reviews_list'], axis=1, inplace=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) np.unique(newdata['rating'], return_counts=True)
code
17111364/cell_16
[ "text_html_output_1.png" ]
import pandas as pd data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.index = newdata['name'] newdata.drop(['name', 'url', 'phone', 'listed_in(city)', 'listed_in(type)_x', 'address', 'dish_liked', 'listed_in(type)_y', 'menu_item', 'cuisines', 'reviews_list'], axis=1, inplace=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata.drop('rate', axis=1, inplace=True) newdata.describe(include='all')
code
18155947/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd def one_hot_encoder(df, nan_as_category=True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return (df, new_columns) def train_test(): train = pd.read_csv('../input/application_train.csv') test = pd.read_csv('../input/application_test.csv') (print(train.shape), print(test.shape)) ind_train = train['SK_ID_CURR'] ind_test = test['SK_ID_CURR'] new_df = train.append(test).reset_index() avg = [col for col in new_df.columns if 'AVG' in col] new_df['AVG_MEAN'] = new_df[avg].sum(axis=1) new_df['AVG_SUM'] = new_df[avg].sum(axis=1) new_df['AVG_STD'] = new_df[avg].std(axis=1) new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1) mode = [col for col in new_df.columns if 'MODE' in col] new_df['MODE_MEAN'] = new_df[mode].sum(axis=1) new_df['MODE_SUM'] = new_df[mode].sum(axis=1) new_df['MODE_STD'] = new_df[mode].std(axis=1) new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1) medi = [col for col in new_df.columns if 'MEDI' in col] new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1) new_df['MEDI_SUM'] = new_df[medi].sum(axis=1) new_df['MEDI_STD'] = new_df[medi].std(axis=1) new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1) new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365 new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365 new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365 new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365 new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT'] new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY'] new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL'] new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT'] new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3'] new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3 new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE'] new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE'] flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col] new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1) new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1) new_df, col = one_hot_encoder(new_df) train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)] test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)] (print(train.shape), print(test.shape)) return (train, test) def bureau_bb(): bureau_balance = pd.read_csv('../input/bureau_balance.csv') bureau = pd.read_csv('../input/bureau.csv') bureau_balance, cat_bb = one_hot_encoder(bureau_balance) bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']} bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg) bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()]) bureau_balance_agg = bureau_balance_agg.reset_index() bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1) bureau, cat_b = one_hot_encoder(bureau) b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']} cat_b_agg = {} for col in cat_b: cat_b_agg[col] = ['mean'] for col in cat_bb: cat_b_agg[col + '_SUM'] = ['mean'] cat_b_agg[col + '_MEAN'] = ['mean'] bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg}) bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()]) bureau_agg = bureau_agg.reset_index() return bureau_agg bureau = bureau_bb() train = pd.merge(train, bureau, how='left', on='SK_ID_CURR') test = pd.merge(test, bureau, how='left', on='SK_ID_CURR') def previous_application(): previous_application = pd.read_csv('../input/previous_application.csv') previous_application, cat_pr = one_hot_encoder(previous_application) previous_application['RATE_DOWN_PAYMENT'][previous_application['RATE_DOWN_PAYMENT'] < 0] = np.nan previous_application['AMT_DOWN_PAYMENT'][previous_application['AMT_DOWN_PAYMENT'] < 0] = np.nan previous_application['DAYS_TERMINATION'][previous_application['DAYS_TERMINATION'] == 365243] = np.nan previous_application['DAYS_LAST_DUE'][previous_application['DAYS_LAST_DUE'] == 365243] = np.nan previous_application['DAYS_FIRST_DUE'][previous_application['DAYS_FIRST_DUE'] == 365243] = np.nan previous_application['DAYS_FIRST_DRAWING'][previous_application['DAYS_FIRST_DRAWING'] == 365243] = np.nan pa_agg = {'AMT_ANNUITY': ['median', 'min', 'max'], 'AMT_APPLICATION': ['median', 'min', 'max'], 'AMT_CREDIT': ['median', 'min', 'max'], 'AMT_DOWN_PAYMENT': ['median', 'min', 'max'], 'AMT_GOODS_PRICE': ['median', 'min', 'max'], 'HOUR_APPR_PROCESS_START': ['mean', 'min', 'max'], 'NFLAG_LAST_APPL_IN_DAY': ['sum'], 'RATE_DOWN_PAYMENT': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIMARY': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIVILEGED': ['mean', 'min', 'max', 'sum'], 'DAYS_DECISION': ['median', 'min', 'max'], 'CNT_PAYMENT': ['median', 'min', 'max'], 'DAYS_FIRST_DRAWING': ['median', 'min', 'max'], 'DAYS_FIRST_DUE': ['median', 'min', 'max'], 'DAYS_LAST_DUE': ['median', 'min', 'max'], 'DAYS_TERMINATION': ['median', 'min', 'max'], 'NFLAG_INSURED_ON_APPROVAL': ['sum']} cat_agg = {} for cat in cat_pr: cat_agg[cat] = ['mean'] previous_application_agg = previous_application.groupby('SK_ID_CURR').agg({**pa_agg, **cat_agg}) previous_application_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in previous_application_agg.columns.tolist()]) previous_application_agg = previous_application_agg.reset_index() return previous_application_agg previous_application = previous_application()
code
18155947/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd def one_hot_encoder(df, nan_as_category=True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return (df, new_columns) def train_test(): train = pd.read_csv('../input/application_train.csv') test = pd.read_csv('../input/application_test.csv') (print(train.shape), print(test.shape)) ind_train = train['SK_ID_CURR'] ind_test = test['SK_ID_CURR'] new_df = train.append(test).reset_index() avg = [col for col in new_df.columns if 'AVG' in col] new_df['AVG_MEAN'] = new_df[avg].sum(axis=1) new_df['AVG_SUM'] = new_df[avg].sum(axis=1) new_df['AVG_STD'] = new_df[avg].std(axis=1) new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1) mode = [col for col in new_df.columns if 'MODE' in col] new_df['MODE_MEAN'] = new_df[mode].sum(axis=1) new_df['MODE_SUM'] = new_df[mode].sum(axis=1) new_df['MODE_STD'] = new_df[mode].std(axis=1) new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1) medi = [col for col in new_df.columns if 'MEDI' in col] new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1) new_df['MEDI_SUM'] = new_df[medi].sum(axis=1) new_df['MEDI_STD'] = new_df[medi].std(axis=1) new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1) new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365 new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365 new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365 new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365 new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT'] new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY'] new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL'] new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT'] new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3'] new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3 new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE'] new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE'] flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col] new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1) new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1) new_df, col = one_hot_encoder(new_df) train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)] test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)] (print(train.shape), print(test.shape)) return (train, test) train, test = train_test()
code
18155947/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd def one_hot_encoder(df, nan_as_category=True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return (df, new_columns) def train_test(): train = pd.read_csv('../input/application_train.csv') test = pd.read_csv('../input/application_test.csv') (print(train.shape), print(test.shape)) ind_train = train['SK_ID_CURR'] ind_test = test['SK_ID_CURR'] new_df = train.append(test).reset_index() avg = [col for col in new_df.columns if 'AVG' in col] new_df['AVG_MEAN'] = new_df[avg].sum(axis=1) new_df['AVG_SUM'] = new_df[avg].sum(axis=1) new_df['AVG_STD'] = new_df[avg].std(axis=1) new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1) mode = [col for col in new_df.columns if 'MODE' in col] new_df['MODE_MEAN'] = new_df[mode].sum(axis=1) new_df['MODE_SUM'] = new_df[mode].sum(axis=1) new_df['MODE_STD'] = new_df[mode].std(axis=1) new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1) medi = [col for col in new_df.columns if 'MEDI' in col] new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1) new_df['MEDI_SUM'] = new_df[medi].sum(axis=1) new_df['MEDI_STD'] = new_df[medi].std(axis=1) new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1) new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365 new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365 new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365 new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365 new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT'] new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY'] new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL'] new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT'] new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3'] new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3 new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE'] new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE'] flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col] new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1) new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1) new_df, col = one_hot_encoder(new_df) train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)] test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)] (print(train.shape), print(test.shape)) return (train, test) def bureau_bb(): bureau_balance = pd.read_csv('../input/bureau_balance.csv') bureau = pd.read_csv('../input/bureau.csv') bureau_balance, cat_bb = one_hot_encoder(bureau_balance) bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']} bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg) bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()]) bureau_balance_agg = bureau_balance_agg.reset_index() bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1) bureau, cat_b = one_hot_encoder(bureau) b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']} cat_b_agg = {} for col in cat_b: cat_b_agg[col] = ['mean'] for col in cat_bb: cat_b_agg[col + '_SUM'] = ['mean'] cat_b_agg[col + '_MEAN'] = ['mean'] bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg}) bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()]) bureau_agg = bureau_agg.reset_index() return bureau_agg bureau = bureau_bb()
code
18155947/cell_12
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd def one_hot_encoder(df, nan_as_category=True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return (df, new_columns) def train_test(): train = pd.read_csv('../input/application_train.csv') test = pd.read_csv('../input/application_test.csv') (print(train.shape), print(test.shape)) ind_train = train['SK_ID_CURR'] ind_test = test['SK_ID_CURR'] new_df = train.append(test).reset_index() avg = [col for col in new_df.columns if 'AVG' in col] new_df['AVG_MEAN'] = new_df[avg].sum(axis=1) new_df['AVG_SUM'] = new_df[avg].sum(axis=1) new_df['AVG_STD'] = new_df[avg].std(axis=1) new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1) mode = [col for col in new_df.columns if 'MODE' in col] new_df['MODE_MEAN'] = new_df[mode].sum(axis=1) new_df['MODE_SUM'] = new_df[mode].sum(axis=1) new_df['MODE_STD'] = new_df[mode].std(axis=1) new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1) medi = [col for col in new_df.columns if 'MEDI' in col] new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1) new_df['MEDI_SUM'] = new_df[medi].sum(axis=1) new_df['MEDI_STD'] = new_df[medi].std(axis=1) new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1) new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365 new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365 new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365 new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365 new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS'] new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT'] new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY'] new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL'] new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT'] new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3'] new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3 new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE'] new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2 new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE'] flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col] new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1) new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1) new_df, col = one_hot_encoder(new_df) train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)] test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)] (print(train.shape), print(test.shape)) return (train, test) def bureau_bb(): bureau_balance = pd.read_csv('../input/bureau_balance.csv') bureau = pd.read_csv('../input/bureau.csv') bureau_balance, cat_bb = one_hot_encoder(bureau_balance) bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']} bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg) bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()]) bureau_balance_agg = bureau_balance_agg.reset_index() bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1) bureau, cat_b = one_hot_encoder(bureau) b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']} cat_b_agg = {} for col in cat_b: cat_b_agg[col] = ['mean'] for col in cat_bb: cat_b_agg[col + '_SUM'] = ['mean'] cat_b_agg[col + '_MEAN'] = ['mean'] bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg}) bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()]) bureau_agg = bureau_agg.reset_index() return bureau_agg bureau = bureau_bb() train = pd.merge(train, bureau, how='left', on='SK_ID_CURR') test = pd.merge(test, bureau, how='left', on='SK_ID_CURR') def previous_application(): previous_application = pd.read_csv('../input/previous_application.csv') previous_application, cat_pr = one_hot_encoder(previous_application) previous_application['RATE_DOWN_PAYMENT'][previous_application['RATE_DOWN_PAYMENT'] < 0] = np.nan previous_application['AMT_DOWN_PAYMENT'][previous_application['AMT_DOWN_PAYMENT'] < 0] = np.nan previous_application['DAYS_TERMINATION'][previous_application['DAYS_TERMINATION'] == 365243] = np.nan previous_application['DAYS_LAST_DUE'][previous_application['DAYS_LAST_DUE'] == 365243] = np.nan previous_application['DAYS_FIRST_DUE'][previous_application['DAYS_FIRST_DUE'] == 365243] = np.nan previous_application['DAYS_FIRST_DRAWING'][previous_application['DAYS_FIRST_DRAWING'] == 365243] = np.nan pa_agg = {'AMT_ANNUITY': ['median', 'min', 'max'], 'AMT_APPLICATION': ['median', 'min', 'max'], 'AMT_CREDIT': ['median', 'min', 'max'], 'AMT_DOWN_PAYMENT': ['median', 'min', 'max'], 'AMT_GOODS_PRICE': ['median', 'min', 'max'], 'HOUR_APPR_PROCESS_START': ['mean', 'min', 'max'], 'NFLAG_LAST_APPL_IN_DAY': ['sum'], 'RATE_DOWN_PAYMENT': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIMARY': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIVILEGED': ['mean', 'min', 'max', 'sum'], 'DAYS_DECISION': ['median', 'min', 'max'], 'CNT_PAYMENT': ['median', 'min', 'max'], 'DAYS_FIRST_DRAWING': ['median', 'min', 'max'], 'DAYS_FIRST_DUE': ['median', 'min', 'max'], 'DAYS_LAST_DUE': ['median', 'min', 'max'], 'DAYS_TERMINATION': ['median', 'min', 'max'], 'NFLAG_INSURED_ON_APPROVAL': ['sum']} cat_agg = {} for cat in cat_pr: cat_agg[cat] = ['mean'] previous_application_agg = previous_application.groupby('SK_ID_CURR').agg({**pa_agg, **cat_agg}) previous_application_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in previous_application_agg.columns.tolist()]) previous_application_agg = previous_application_agg.reset_index() return previous_application_agg previous_application = previous_application() train = pd.merge(train, previous_application, how='left', on='SK_ID_CURR') test = pd.merge(test, previous_application, how='left', on='SK_ID_CURR') def POS_CASH_balance(): POS_CASH_balance = pd.read_csv('../input/POS_CASH_balance.csv') POS_CASH_balance, cat_pc_b = one_hot_encoder(POS_CASH_balance) pc_b_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'CNT_INSTALMENT': ['median', 'min', 'max'], 'CNT_INSTALMENT_FUTURE': ['median', 'min', 'max'], 'SK_DPD': ['median', 'min', 'max'], 'SK_DPD_DEF': ['median', 'min', 'max']} cat_agg = {} for cat in cat_pc_b: cat_agg[cat] = ['mean'] POS_CASH_balance_agg = POS_CASH_balance.groupby(['SK_ID_CURR']).agg({**pc_b_agg, **cat_agg}) POS_CASH_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in POS_CASH_balance_agg.columns.tolist()]) POS_CASH_balance_agg = POS_CASH_balance_agg.reset_index() return POS_CASH_balance_agg POS_CASH_balance = POS_CASH_balance()
code
128009699/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd file_path = '/kaggle/input/twitter-suicidal-data/twitter-suicidal_data.txt' d = {'tweet': [], 'intention': []} column_names = [] file = open(file_path) for f in file: content = f.split(',') if content[0] == 'tweet': column_names.append(content[0]) column_names.append(content[1][:-1]) elif content[1][:-1] != '': d['tweet'].append(content[0]) d['intention'].append(int(content[1][:-1])) db = pd.DataFrame(data=d, columns=column_names) print('Cantidad de datos intencion de suicidio: ', len([i for i in list(db['intention'].values) if i == 1]))
code
128009699/cell_4
[ "text_plain_output_1.png" ]
from datasets import load_dataset from datasets import load_dataset dataset_hugging = load_dataset('dannyvas23/notas_suicidios')
code
128009699/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd file_path = '/kaggle/input/twitter-suicidal-data/twitter-suicidal_data.txt' d = {'tweet': [], 'intention': []} column_names = [] file = open(file_path) for f in file: content = f.split(',') if content[0] == 'tweet': column_names.append(content[0]) column_names.append(content[1][:-1]) elif content[1][:-1] != '': d['tweet'].append(content[0]) d['intention'].append(int(content[1][:-1])) db = pd.DataFrame(data=d, columns=column_names) print('Cantidad de datos sin intencion de suicidio: ', len([i for i in list(db['intention'].values) if i == 0]))
code
2038426/cell_21
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) trainNum.columns[trainNum.isnull().any()].tolist() trainNum['GarageYrBlt'].fillna(trainNum['GarageYrBlt'].value_counts().idxmax(), inplace=True)
code
2038426/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
code
2038426/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) trainNum.columns[trainNum.isnull().any()].tolist() np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values) trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values) trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values) trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values) trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values) trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values) trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values) trainCatTransformed = trainCat1.apply(le.fit_transform) trainFinal = pd.concat([trainNum, trainCatTransformed], axis=1) trainFinal.head()
code
2038426/cell_39
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist())))
code
2038426/cell_41
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist()))) np.array(list(zip(train.Id, testCat.columns[testCat.isnull().any()].tolist())))
code
2038426/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
code
2038426/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) trainNum.columns[trainNum.isnull().any()].tolist() trainNum['LotFrontage'].fillna(trainNum['LotFrontage'].mean(), inplace=True) trainNum['MasVnrArea'].fillna(trainNum['MasVnrArea'].mean(), inplace=True)
code
2038426/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns)))
code
2038426/cell_28
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) trainNum.columns[trainNum.isnull().any()].tolist() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values) trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values) trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values) trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values) trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values) trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values) trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
code
2038426/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) trainNum.columns[trainNum.isnull().any()].tolist()
code
2038426/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
code
2038426/cell_43
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist()))) testNum['BsmtFinSF1'].fillna(testNum['BsmtFinSF1'].mean(), inplace=True) testNum['BsmtFinSF2'].fillna(testNum['BsmtFinSF2'].mean(), inplace=True) testNum['BsmtUnfSF'].fillna(testNum['BsmtUnfSF'].mean(), inplace=True) testNum['TotalBsmtSF'].fillna(testNum['TotalBsmtSF'].mean(), inplace=True) testNum['BsmtFullBath'].fillna(testNum['BsmtFullBath'].mean(), inplace=True) testNum['BsmtHalfBath'].fillna(testNum['BsmtHalfBath'].mean(), inplace=True) testNum['GarageCars'].fillna(testNum['GarageCars'].mean(), inplace=True) testNum['GarageArea'].fillna(testNum['GarageArea'].mean(), inplace=True) testNum['LotFrontage'].fillna(testNum['LotFrontage'].mean(), inplace=True) testNum['MasVnrArea'].fillna(testNum['MasVnrArea'].mean(), inplace=True) testNum['GarageYrBlt'].fillna(testNum['GarageYrBlt'].value_counts().idxmax(), inplace=True)
code
2038426/cell_37
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') np.array(list(zip(train.Id, train.columns))) np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) trainNum = train.select_dtypes(include=[np.number]) trainCat = train.select_dtypes(include=[object]) testNum = test.select_dtypes(include=[np.number]) testCat = test.select_dtypes(include=[object]) np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
code
2038426/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
72086968/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') train_df.head(5)
code
72086968/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') sub_df = pd.read_csv('../input/nlp-getting-started/sample_submission.csv') sub_df.head()
code
72086968/cell_20
[ "text_plain_output_1.png" ]
from sklearn import feature_extraction from sklearn.linear_model import RidgeClassifier from sklearn.model_selection import cross_val_score import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') count_vectorizer = feature_extraction.text.CountVectorizer() example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5]) train_vectors = count_vectorizer.fit_transform(train_df['text']) test_vectors = count_vectorizer.transform(test_df['text']) clf = RidgeClassifier() scores = cross_val_score(clf, train_vectors, train_df['target'], cv=3, scoring='f1') clf.fit(train_vectors, train_df['target'])
code
72086968/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') train_df[train_df['target'] == 1].text.values[1]
code
72086968/cell_11
[ "text_html_output_1.png" ]
from sklearn import feature_extraction import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') count_vectorizer = feature_extraction.text.CountVectorizer() example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5]) print(example_train_vectors.todense().shape) print(example_train_vectors.todense())
code
72086968/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') train_df[train_df['target'] == 0].text.values[1]
code
72086968/cell_18
[ "text_plain_output_1.png" ]
from sklearn import feature_extraction from sklearn.linear_model import RidgeClassifier from sklearn.model_selection import cross_val_score import pandas as pd train_df = pd.read_csv('../input/nlp-getting-started/train.csv') test_df = pd.read_csv('../input/nlp-getting-started/test.csv') count_vectorizer = feature_extraction.text.CountVectorizer() example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5]) train_vectors = count_vectorizer.fit_transform(train_df['text']) test_vectors = count_vectorizer.transform(test_df['text']) clf = RidgeClassifier() scores = cross_val_score(clf, train_vectors, train_df['target'], cv=3, scoring='f1') scores
code
72086968/cell_16
[ "text_plain_output_1.png" ]
from sklearn.model_selection import cross_val_score help(cross_val_score)
code
90155596/cell_5
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import tensorflow as tf path = '../input/fzxdata2/imgs/longan2berry/TestB/3.jpg' def img_read(path): """读取一张图片""" img = cv2.imread(path) img = img[..., ::-1] img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_LINEAR) return img with tf.Session() as sess: tf.global_variables_initializer().run() output_graph_def = tf.GraphDef() with open('../input/longan2blueberry/train_models/epoch_29/model.pb', 'rb') as f: output_graph_def.ParseFromString(f.read()) tf.import_graph_def(output_graph_def, name='') inputs = sess.graph.get_tensor_by_name('X_B:0') label = sess.graph.get_tensor_by_name('gen_B2A/generator:0') img = img_read(path).astype(np.float32) / 127.5 - 1 y = sess.run(label, feed_dict={inputs: np.expand_dims(img, axis=0)}) y = 0.5 * y[0] + 0.5 cv2.imwrite('res0.jpg', y[..., ::-1] * 255) plt.axis('off') plt.imshow(y)
code
129026803/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() # create subplots fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() # loop over features and plot scatter plot for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') # adjust subplot spacing # plt.subplots_adjust(hspace=0.5) # display plot plt.show() train_df.columns
code
129026803/cell_9
[ "application_vnd.jupyter.stderr_output_766.png", "application_vnd.jupyter.stderr_output_116.png", "application_vnd.jupyter.stderr_output_74.png", "application_vnd.jupyter.stderr_output_268.png", "application_vnd.jupyter.stderr_output_145.png", "application_vnd.jupyter.stderr_output_362.png", "application_vnd.jupyter.stderr_output_493.png", "application_vnd.jupyter.stderr_output_667.png", "application_vnd.jupyter.stderr_output_289.png", "application_vnd.jupyter.stderr_output_313.png", "application_vnd.jupyter.stderr_output_566.png", "application_vnd.jupyter.stderr_output_373.png", "application_vnd.jupyter.stderr_output_578.png", "application_vnd.jupyter.stderr_output_516.png", "application_vnd.jupyter.stderr_output_672.png", "application_vnd.jupyter.stderr_output_529.png", "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_732.png", "application_vnd.jupyter.stderr_output_222.png", "application_vnd.jupyter.stderr_output_626.png", "application_vnd.jupyter.stderr_output_96.png", "application_vnd.jupyter.stderr_output_115.png", "application_vnd.jupyter.stderr_output_642.png", "application_vnd.jupyter.stderr_output_207.png", "application_vnd.jupyter.stderr_output_640.png", "application_vnd.jupyter.stderr_output_341.png", "application_vnd.jupyter.stderr_output_723.png", "application_vnd.jupyter.stderr_output_296.png", "application_vnd.jupyter.stderr_output_110.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_637.png", "application_vnd.jupyter.stderr_output_745.png", "application_vnd.jupyter.stderr_output_112.png", "application_vnd.jupyter.stderr_output_400.png", "application_vnd.jupyter.stderr_output_212.png", "application_vnd.jupyter.stderr_output_499.png", "application_vnd.jupyter.stderr_output_768.png", "application_vnd.jupyter.stderr_output_700.png", "application_vnd.jupyter.stderr_output_634.png", "application_vnd.jupyter.stderr_output_740.png", "application_vnd.jupyter.stderr_output_420.png", "application_vnd.jupyter.stderr_output_77.png", "application_vnd.jupyter.stderr_output_24.png", "application_vnd.jupyter.stderr_output_354.png", "application_vnd.jupyter.stderr_output_417.png", "application_vnd.jupyter.stderr_output_16.png", "application_vnd.jupyter.stderr_output_274.png", "application_vnd.jupyter.stderr_output_610.png", "application_vnd.jupyter.stderr_output_461.png", "application_vnd.jupyter.stderr_output_205.png", "application_vnd.jupyter.stderr_output_632.png", "application_vnd.jupyter.stderr_output_203.png", "application_vnd.jupyter.stderr_output_368.png", "application_vnd.jupyter.stderr_output_575.png", "application_vnd.jupyter.stderr_output_185.png", "application_vnd.jupyter.stderr_output_760.png", "application_vnd.jupyter.stderr_output_474.png", "application_vnd.jupyter.stderr_output_227.png", "application_vnd.jupyter.stderr_output_258.png", "application_vnd.jupyter.stderr_output_758.png", "application_vnd.jupyter.stderr_output_668.png", "application_vnd.jupyter.stderr_output_622.png", "application_vnd.jupyter.stderr_output_287.png", "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_286.png", "application_vnd.jupyter.stderr_output_426.png", "application_vnd.jupyter.stderr_output_152.png", "application_vnd.jupyter.stderr_output_156.png", "application_vnd.jupyter.stderr_output_553.png", "application_vnd.jupyter.stderr_output_335.png", "application_vnd.jupyter.stderr_output_215.png", "application_vnd.jupyter.stderr_output_691.png", "application_vnd.jupyter.stderr_output_522.png", "application_vnd.jupyter.stderr_output_283.png", "application_vnd.jupyter.stderr_output_710.png", "application_vnd.jupyter.stderr_output_684.png", "application_vnd.jupyter.stderr_output_643.png", "application_vnd.jupyter.stderr_output_70.png", "application_vnd.jupyter.stderr_output_310.png", "application_vnd.jupyter.stderr_output_631.png", "application_vnd.jupyter.stderr_output_449.png", "application_vnd.jupyter.stderr_output_554.png", "application_vnd.jupyter.stderr_output_204.png", "application_vnd.jupyter.stderr_output_284.png", "application_vnd.jupyter.stderr_output_124.png", "application_vnd.jupyter.stderr_output_223.png", "application_vnd.jupyter.stderr_output_767.png", "application_vnd.jupyter.stderr_output_498.png", "application_vnd.jupyter.stderr_output_435.png", "application_vnd.jupyter.stderr_output_219.png", "application_vnd.jupyter.stderr_output_279.png", "application_vnd.jupyter.stderr_output_771.png", "application_vnd.jupyter.stderr_output_81.png", "application_vnd.jupyter.stderr_output_111.png", "application_vnd.jupyter.stderr_output_52.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_605.png", "application_vnd.jupyter.stderr_output_131.png", "application_vnd.jupyter.stderr_output_695.png", "application_vnd.jupyter.stderr_output_437.png", "application_vnd.jupyter.stderr_output_172.png", "application_vnd.jupyter.stderr_output_545.png", "application_vnd.jupyter.stderr_output_512.png", "application_vnd.jupyter.stderr_output_99.png", "application_vnd.jupyter.stderr_output_348.png", "application_vnd.jupyter.stderr_output_32.png", "application_vnd.jupyter.stderr_output_246.png", "application_vnd.jupyter.stderr_output_704.png", "application_vnd.jupyter.stderr_output_385.png", "application_vnd.jupyter.stderr_output_183.png", "application_vnd.jupyter.stderr_output_502.png", "application_vnd.jupyter.stderr_output_181.png", "application_vnd.jupyter.stderr_output_722.png", "application_vnd.jupyter.stderr_output_299.png", "application_vnd.jupyter.stderr_output_141.png", "application_vnd.jupyter.stderr_output_176.png", "application_vnd.jupyter.stderr_output_356.png", "application_vnd.jupyter.stderr_output_737.png", "application_vnd.jupyter.stderr_output_297.png", "application_vnd.jupyter.stderr_output_506.png", "application_vnd.jupyter.stderr_output_93.png", "application_vnd.jupyter.stderr_output_563.png", "application_vnd.jupyter.stderr_output_346.png", "application_vnd.jupyter.stderr_output_651.png", "application_vnd.jupyter.stderr_output_641.png", "application_vnd.jupyter.stderr_output_382.png", "application_vnd.jupyter.stderr_output_170.png", "application_vnd.jupyter.stderr_output_132.png", "application_vnd.jupyter.stderr_output_713.png", "application_vnd.jupyter.stderr_output_471.png", "application_vnd.jupyter.stderr_output_655.png", "application_vnd.jupyter.stderr_output_123.png", "application_vnd.jupyter.stderr_output_692.png", "application_vnd.jupyter.stderr_output_465.png", "application_vnd.jupyter.stderr_output_540.png", "application_vnd.jupyter.stderr_output_48.png", "application_vnd.jupyter.stderr_output_236.png", "application_vnd.jupyter.stderr_output_418.png", "application_vnd.jupyter.stderr_output_391.png", "application_vnd.jupyter.stderr_output_636.png", "application_vnd.jupyter.stderr_output_550.png", "application_vnd.jupyter.stderr_output_765.png", "application_vnd.jupyter.stderr_output_731.png", "application_vnd.jupyter.stderr_output_355.png", "application_vnd.jupyter.stderr_output_421.png", "application_vnd.jupyter.stderr_output_378.png", "application_vnd.jupyter.stderr_output_764.png", "application_vnd.jupyter.stderr_output_432.png", "application_vnd.jupyter.stderr_output_431.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_472.png", "application_vnd.jupyter.stderr_output_774.png", "application_vnd.jupyter.stderr_output_776.png", "application_vnd.jupyter.stderr_output_73.png", "application_vnd.jupyter.stderr_output_137.png", "application_vnd.jupyter.stderr_output_133.png", "application_vnd.jupyter.stderr_output_165.png", "application_vnd.jupyter.stderr_output_715.png", "application_vnd.jupyter.stderr_output_504.png", "application_vnd.jupyter.stderr_output_381.png", "application_vnd.jupyter.stderr_output_75.png", "application_vnd.jupyter.stderr_output_552.png", "application_vnd.jupyter.stderr_output_627.png", "application_vnd.jupyter.stderr_output_694.png", "application_vnd.jupyter.stderr_output_585.png", "application_vnd.jupyter.stderr_output_365.png", "application_vnd.jupyter.stderr_output_618.png", "application_vnd.jupyter.stderr_output_693.png", "application_vnd.jupyter.stderr_output_392.png", "application_vnd.jupyter.stderr_output_513.png", "application_vnd.jupyter.stderr_output_690.png", "application_vnd.jupyter.stderr_output_593.png", "application_vnd.jupyter.stderr_output_666.png", "application_vnd.jupyter.stderr_output_653.png", "application_vnd.jupyter.stderr_output_414.png", "application_vnd.jupyter.stderr_output_436.png", "application_vnd.jupyter.stderr_output_608.png", "application_vnd.jupyter.stderr_output_146.png", "application_vnd.jupyter.stderr_output_321.png", "application_vnd.jupyter.stderr_output_629.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_467.png", "application_vnd.jupyter.stderr_output_324.png", "application_vnd.jupyter.stderr_output_407.png", "application_vnd.jupyter.stderr_output_528.png", "application_vnd.jupyter.stderr_output_360.png", "application_vnd.jupyter.stderr_output_484.png", "application_vnd.jupyter.stderr_output_674.png", "application_vnd.jupyter.stderr_output_537.png", "application_vnd.jupyter.stderr_output_190.png", "application_vnd.jupyter.stderr_output_447.png", "application_vnd.jupyter.stderr_output_380.png", "application_vnd.jupyter.stderr_output_270.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_361.png", "application_vnd.jupyter.stderr_output_155.png", "application_vnd.jupyter.stderr_output_344.png", "application_vnd.jupyter.stderr_output_18.png", "application_vnd.jupyter.stderr_output_86.png", "application_vnd.jupyter.stderr_output_334.png", "application_vnd.jupyter.stderr_output_526.png", "application_vnd.jupyter.stderr_output_649.png", "application_vnd.jupyter.stderr_output_423.png", "application_vnd.jupyter.stderr_output_754.png", "application_vnd.jupyter.stderr_output_277.png", "application_vnd.jupyter.stderr_output_291.png", "application_vnd.jupyter.stderr_output_38.png", "application_vnd.jupyter.stderr_output_482.png", "application_vnd.jupyter.stderr_output_568.png", "application_vnd.jupyter.stderr_output_231.png", "application_vnd.jupyter.stderr_output_317.png", "application_vnd.jupyter.stderr_output_240.png", "application_vnd.jupyter.stderr_output_272.png", "application_vnd.jupyter.stderr_output_88.png", "application_vnd.jupyter.stderr_output_65.png", "application_vnd.jupyter.stderr_output_443.png", "application_vnd.jupyter.stderr_output_235.png", "application_vnd.jupyter.stderr_output_148.png", "application_vnd.jupyter.stderr_output_520.png", "application_vnd.jupyter.stderr_output_453.png", "application_vnd.jupyter.stderr_output_751.png", "application_vnd.jupyter.stderr_output_179.png", "application_vnd.jupyter.stderr_output_143.png", "application_vnd.jupyter.stderr_output_409.png", "application_vnd.jupyter.stderr_output_58.png", "application_vnd.jupyter.stderr_output_615.png", "application_vnd.jupyter.stderr_output_638.png", "application_vnd.jupyter.stderr_output_66.png", "application_vnd.jupyter.stderr_output_724.png", "application_vnd.jupyter.stderr_output_718.png", "application_vnd.jupyter.stderr_output_68.png", "application_vnd.jupyter.stderr_output_171.png", "application_vnd.jupyter.stderr_output_106.png", "application_vnd.jupyter.stderr_output_351.png", "application_vnd.jupyter.stderr_output_224.png", "application_vnd.jupyter.stderr_output_105.png", "application_vnd.jupyter.stderr_output_763.png", "application_vnd.jupyter.stderr_output_275.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_345.png", "application_vnd.jupyter.stderr_output_495.png", "application_vnd.jupyter.stderr_output_26.png", "application_vnd.jupyter.stderr_output_178.png", "application_vnd.jupyter.stderr_output_322.png", "application_vnd.jupyter.stderr_output_729.png", "application_vnd.jupyter.stderr_output_749.png", "application_vnd.jupyter.stderr_output_577.png", "application_vnd.jupyter.stderr_output_384.png", "application_vnd.jupyter.stderr_output_406.png", "application_vnd.jupyter.stderr_output_620.png", "application_vnd.jupyter.stderr_output_238.png", "application_vnd.jupyter.stderr_output_439.png", "application_vnd.jupyter.stderr_output_564.png", "application_vnd.jupyter.stderr_output_650.png", "application_vnd.jupyter.stderr_output_371.png", "application_vnd.jupyter.stderr_output_779.png", "application_vnd.jupyter.stderr_output_747.png", "application_vnd.jupyter.stderr_output_253.png", "application_vnd.jupyter.stderr_output_450.png", "application_vnd.jupyter.stderr_output_524.png", "application_vnd.jupyter.stderr_output_490.png", "application_vnd.jupyter.stderr_output_136.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_633.png", "application_vnd.jupyter.stderr_output_389.png", "application_vnd.jupyter.stderr_output_489.png", "application_vnd.jupyter.stderr_output_323.png", "application_vnd.jupyter.stderr_output_422.png", "application_vnd.jupyter.stderr_output_162.png", "application_vnd.jupyter.stderr_output_376.png", "application_vnd.jupyter.stderr_output_676.png", "application_vnd.jupyter.stderr_output_387.png", "application_vnd.jupyter.stderr_output_393.png", "application_vnd.jupyter.stderr_output_232.png", "application_vnd.jupyter.stderr_output_623.png", "application_vnd.jupyter.stderr_output_260.png", "application_vnd.jupyter.stderr_output_31.png", "application_vnd.jupyter.stderr_output_125.png", "application_vnd.jupyter.stderr_output_576.png", "application_vnd.jupyter.stderr_output_134.png", "application_vnd.jupyter.stderr_output_113.png", "application_vnd.jupyter.stderr_output_194.png", "application_vnd.jupyter.stderr_output_755.png", "application_vnd.jupyter.stderr_output_221.png", "application_vnd.jupyter.stderr_output_302.png", "application_vnd.jupyter.stderr_output_599.png", "application_vnd.jupyter.stderr_output_664.png", "application_vnd.jupyter.stderr_output_546.png", "application_vnd.jupyter.stderr_output_305.png", "application_vnd.jupyter.stderr_output_476.png", "application_vnd.jupyter.stderr_output_497.png", "application_vnd.jupyter.stderr_output_478.png", "application_vnd.jupyter.stderr_output_656.png", "application_vnd.jupyter.stderr_output_383.png", "application_vnd.jupyter.stderr_output_336.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_402.png", "application_vnd.jupyter.stderr_output_519.png", "application_vnd.jupyter.stderr_output_542.png", "application_vnd.jupyter.stderr_output_738.png", "application_vnd.jupyter.stderr_output_725.png", "application_vnd.jupyter.stderr_output_518.png", "application_vnd.jupyter.stderr_output_245.png", "application_vnd.jupyter.stderr_output_316.png", "application_vnd.jupyter.stderr_output_468.png", "application_vnd.jupyter.stderr_output_662.png", "application_vnd.jupyter.stderr_output_25.png", "application_vnd.jupyter.stderr_output_750.png", "application_vnd.jupyter.stderr_output_714.png", "application_vnd.jupyter.stderr_output_762.png", "application_vnd.jupyter.stderr_output_699.png", "application_vnd.jupyter.stderr_output_419.png", "application_vnd.jupyter.stderr_output_697.png", "application_vnd.jupyter.stderr_output_570.png", "application_vnd.jupyter.stderr_output_404.png", "application_vnd.jupyter.stderr_output_609.png", "application_vnd.jupyter.stderr_output_330.png", "application_vnd.jupyter.stderr_output_403.png", "application_vnd.jupyter.stderr_output_249.png", "application_vnd.jupyter.stderr_output_229.png", "application_vnd.jupyter.stderr_output_366.png", "application_vnd.jupyter.stderr_output_263.png", "application_vnd.jupyter.stderr_output_278.png", "application_vnd.jupyter.stderr_output_716.png", "application_vnd.jupyter.stderr_output_273.png", "application_vnd.jupyter.stderr_output_525.png", "application_vnd.jupyter.stderr_output_135.png", "application_vnd.jupyter.stderr_output_769.png", "application_vnd.jupyter.stderr_output_555.png", "application_vnd.jupyter.stderr_output_211.png", "application_vnd.jupyter.stderr_output_517.png", "application_vnd.jupyter.stderr_output_174.png", "application_vnd.jupyter.stderr_output_777.png", "application_vnd.jupyter.stderr_output_503.png", "application_vnd.jupyter.stderr_output_454.png", "application_vnd.jupyter.stderr_output_515.png", "application_vnd.jupyter.stderr_output_757.png", "application_vnd.jupyter.stderr_output_510.png", "application_vnd.jupyter.stderr_output_12.png", "application_vnd.jupyter.stderr_output_463.png", "application_vnd.jupyter.stderr_output_720.png", "application_vnd.jupyter.stderr_output_574.png", "application_vnd.jupyter.stderr_output_285.png", "application_vnd.jupyter.stderr_output_177.png", "application_vnd.jupyter.stderr_output_527.png", "application_vnd.jupyter.stderr_output_644.png", "application_vnd.jupyter.stderr_output_342.png", "application_vnd.jupyter.stderr_output_665.png", "application_vnd.jupyter.stderr_output_89.png", "application_vnd.jupyter.stderr_output_82.png", "application_vnd.jupyter.stderr_output_269.png", "application_vnd.jupyter.stderr_output_288.png", "application_vnd.jupyter.stderr_output_358.png", "application_vnd.jupyter.stderr_output_398.png", "application_vnd.jupyter.stderr_output_535.png", "application_vnd.jupyter.stderr_output_772.png", "application_vnd.jupyter.stderr_output_388.png", "application_vnd.jupyter.stderr_output_332.png", "application_vnd.jupyter.stderr_output_72.png", "application_vnd.jupyter.stderr_output_290.png", "application_vnd.jupyter.stderr_output_586.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_189.png", "application_vnd.jupyter.stderr_output_149.png", "application_vnd.jupyter.stderr_output_308.png", "application_vnd.jupyter.stderr_output_91.png", "application_vnd.jupyter.stderr_output_239.png", "application_vnd.jupyter.stderr_output_95.png", "application_vnd.jupyter.stderr_output_394.png", "application_vnd.jupyter.stderr_output_580.png", "application_vnd.jupyter.stderr_output_541.png", "application_vnd.jupyter.stderr_output_559.png", "application_vnd.jupyter.stderr_output_583.png", "application_vnd.jupyter.stderr_output_496.png", "application_vnd.jupyter.stderr_output_67.png", "application_vnd.jupyter.stderr_output_237.png", "application_vnd.jupyter.stderr_output_339.png", "application_vnd.jupyter.stderr_output_689.png", "application_vnd.jupyter.stderr_output_306.png", "application_vnd.jupyter.stderr_output_604.png", "application_vnd.jupyter.stderr_output_424.png", "application_vnd.jupyter.stderr_output_534.png", "application_vnd.jupyter.stderr_output_337.png", "application_vnd.jupyter.stderr_output_481.png", "application_vnd.jupyter.stderr_output_592.png", "application_vnd.jupyter.stderr_output_80.png", "application_vnd.jupyter.stderr_output_539.png", "application_vnd.jupyter.stderr_output_71.png", "application_vnd.jupyter.stderr_output_300.png", "application_vnd.jupyter.stderr_output_259.png", "application_vnd.jupyter.stderr_output_293.png", "application_vnd.jupyter.stderr_output_600.png", "application_vnd.jupyter.stderr_output_728.png", "application_vnd.jupyter.stderr_output_709.png", "application_vnd.jupyter.stderr_output_257.png", "application_vnd.jupyter.stderr_output_10.png", "application_vnd.jupyter.stderr_output_775.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_159.png", "application_vnd.jupyter.stderr_output_396.png", "application_vnd.jupyter.stderr_output_735.png", "application_vnd.jupyter.stderr_output_325.png", "application_vnd.jupyter.stderr_output_464.png", "application_vnd.jupyter.stderr_output_663.png", "application_vnd.jupyter.stderr_output_220.png", "application_vnd.jupyter.stderr_output_247.png", "application_vnd.jupyter.stderr_output_657.png", "application_vnd.jupyter.stderr_output_675.png", "application_vnd.jupyter.stderr_output_98.png", "application_vnd.jupyter.stderr_output_677.png", "application_vnd.jupyter.stderr_output_59.png", "application_vnd.jupyter.stderr_output_589.png", "application_vnd.jupyter.stderr_output_34.png", "application_vnd.jupyter.stderr_output_197.png", "application_vnd.jupyter.stderr_output_369.png", "application_vnd.jupyter.stderr_output_459.png", "application_vnd.jupyter.stderr_output_536.png", "application_vnd.jupyter.stderr_output_756.png", "application_vnd.jupyter.stderr_output_444.png", "application_vnd.jupyter.stderr_output_90.png", "application_vnd.jupyter.stderr_output_441.png", "application_vnd.jupyter.stderr_output_538.png", "application_vnd.jupyter.stderr_output_352.png", "application_vnd.jupyter.stderr_output_543.png", "application_vnd.jupyter.stderr_output_584.png", "application_vnd.jupyter.stderr_output_485.png", "application_vnd.jupyter.stderr_output_144.png", "application_vnd.jupyter.stderr_output_83.png", "application_vnd.jupyter.stderr_output_140.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_214.png", "application_vnd.jupyter.stderr_output_44.png", "application_vnd.jupyter.stderr_output_505.png", "application_vnd.jupyter.stderr_output_742.png", "application_vnd.jupyter.stderr_output_281.png", "application_vnd.jupyter.stderr_output_590.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_127.png", "application_vnd.jupyter.stderr_output_320.png", "application_vnd.jupyter.stderr_output_544.png", "application_vnd.jupyter.stderr_output_705.png", "application_vnd.jupyter.stderr_output_661.png", "application_vnd.jupyter.stderr_output_440.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_571.png", "application_vnd.jupyter.stderr_output_587.png", "application_vnd.jupyter.stderr_output_195.png", "application_vnd.jupyter.stderr_output_331.png", "application_vnd.jupyter.stderr_output_160.png", "application_vnd.jupyter.stderr_output_42.png", "application_vnd.jupyter.stderr_output_561.png", "application_vnd.jupyter.stderr_output_602.png", "application_vnd.jupyter.stderr_output_298.png", "application_vnd.jupyter.stderr_output_598.png", "application_vnd.jupyter.stderr_output_192.png", "application_vnd.jupyter.stderr_output_770.png", "application_vnd.jupyter.stderr_output_721.png", "application_vnd.jupyter.stderr_output_678.png", "application_vnd.jupyter.stderr_output_702.png", "application_vnd.jupyter.stderr_output_327.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_261.png", "application_vnd.jupyter.stderr_output_483.png", "application_vnd.jupyter.stderr_output_670.png", "application_vnd.jupyter.stderr_output_301.png", "application_vnd.jupyter.stderr_output_84.png", "application_vnd.jupyter.stderr_output_347.png", "application_vnd.jupyter.stderr_output_180.png", "application_vnd.jupyter.stderr_output_411.png", "application_vnd.jupyter.stderr_output_687.png", "application_vnd.jupyter.stderr_output_230.png", "application_vnd.jupyter.stderr_output_428.png", "application_vnd.jupyter.stderr_output_314.png", "application_vnd.jupyter.stderr_output_120.png", "application_vnd.jupyter.stderr_output_778.png", "application_vnd.jupyter.stderr_output_241.png", "application_vnd.jupyter.stderr_output_405.png", "application_vnd.jupyter.stderr_output_752.png", "application_vnd.jupyter.stderr_output_475.png", "application_vnd.jupyter.stderr_output_163.png", "application_vnd.jupyter.stderr_output_558.png", "application_vnd.jupyter.stderr_output_647.png", "application_vnd.jupyter.stderr_output_60.png", "application_vnd.jupyter.stderr_output_648.png", "application_vnd.jupyter.stderr_output_151.png", "application_vnd.jupyter.stderr_output_617.png", "application_vnd.jupyter.stderr_output_103.png", "application_vnd.jupyter.stderr_output_216.png", "application_vnd.jupyter.stderr_output_109.png", "application_vnd.jupyter.stderr_output_372.png", "application_vnd.jupyter.stderr_output_202.png", "application_vnd.jupyter.stderr_output_367.png", "application_vnd.jupyter.stderr_output_184.png", "application_vnd.jupyter.stderr_output_97.png", "application_vnd.jupyter.stderr_output_473.png", "application_vnd.jupyter.stderr_output_594.png", "application_vnd.jupyter.stderr_output_395.png", "application_vnd.jupyter.stderr_output_390.png", "application_vnd.jupyter.stderr_output_596.png", "application_vnd.jupyter.stderr_output_669.png", "application_vnd.jupyter.stderr_output_688.png", "application_vnd.jupyter.stderr_output_201.png", "application_vnd.jupyter.stderr_output_307.png", "application_vnd.jupyter.stderr_output_673.png", "application_vnd.jupyter.stderr_output_660.png", "application_vnd.jupyter.stderr_output_551.png", "application_vnd.jupyter.stderr_output_514.png", "application_vnd.jupyter.stderr_output_30.png", "application_vnd.jupyter.stderr_output_416.png", "application_vnd.jupyter.stderr_output_15.png", "application_vnd.jupyter.stderr_output_603.png", "application_vnd.jupyter.stderr_output_108.png", "application_vnd.jupyter.stderr_output_591.png", "application_vnd.jupyter.stderr_output_579.png", "application_vnd.jupyter.stderr_output_62.png", "application_vnd.jupyter.stderr_output_328.png", "application_vnd.jupyter.stderr_output_679.png", "application_vnd.jupyter.stderr_output_671.png", "application_vnd.jupyter.stderr_output_479.png", "application_vnd.jupyter.stderr_output_470.png", "application_vnd.jupyter.stderr_output_611.png", "application_vnd.jupyter.stderr_output_739.png", "application_vnd.jupyter.stderr_output_635.png", "application_vnd.jupyter.stderr_output_250.png", "application_vnd.jupyter.stderr_output_193.png", "application_vnd.jupyter.stderr_output_17.png", "application_vnd.jupyter.stderr_output_686.png", "application_vnd.jupyter.stderr_output_242.png", "application_vnd.jupyter.stderr_output_87.png", "application_vnd.jupyter.stderr_output_654.png", "application_vnd.jupyter.stderr_output_294.png", "application_vnd.jupyter.stderr_output_619.png", "application_vnd.jupyter.stderr_output_711.png", "application_vnd.jupyter.stderr_output_187.png", "application_vnd.jupyter.stderr_output_588.png", "application_vnd.jupyter.stderr_output_445.png", "application_vnd.jupyter.stderr_output_477.png", "application_vnd.jupyter.stderr_output_612.png", "application_vnd.jupyter.stderr_output_130.png", "application_vnd.jupyter.stderr_output_455.png", "application_vnd.jupyter.stderr_output_28.png", "application_vnd.jupyter.stderr_output_469.png", "application_vnd.jupyter.stderr_output_364.png", "application_vnd.jupyter.stderr_output_448.png", "application_vnd.jupyter.stderr_output_658.png", "application_vnd.jupyter.stderr_output_680.png", "application_vnd.jupyter.stderr_output_117.png", "application_vnd.jupyter.stderr_output_625.png", "application_vnd.jupyter.stderr_output_256.png", "application_vnd.jupyter.stderr_output_46.png", "application_vnd.jupyter.stderr_output_780.png", "application_vnd.jupyter.stderr_output_413.png", "application_vnd.jupyter.stderr_output_401.png", "application_vnd.jupyter.stderr_output_206.png", "application_vnd.jupyter.stderr_output_456.png", "application_vnd.jupyter.stderr_output_234.png", "application_vnd.jupyter.stderr_output_531.png", "application_vnd.jupyter.stderr_output_734.png", "application_vnd.jupyter.stderr_output_312.png", "application_vnd.jupyter.stderr_output_682.png", "application_vnd.jupyter.stderr_output_630.png", "application_vnd.jupyter.stderr_output_69.png", "application_vnd.jupyter.stderr_output_487.png", "application_vnd.jupyter.stderr_output_616.png", "application_vnd.jupyter.stderr_output_606.png", "application_vnd.jupyter.stderr_output_708.png", "application_vnd.jupyter.stderr_output_252.png", "application_vnd.jupyter.stderr_output_64.png", "application_vnd.jupyter.stderr_output_76.png", "application_vnd.jupyter.stderr_output_262.png", "application_vnd.jupyter.stderr_output_41.png", "application_vnd.jupyter.stderr_output_157.png", "application_vnd.jupyter.stderr_output_377.png", "application_vnd.jupyter.stderr_output_727.png", "application_vnd.jupyter.stderr_output_480.png", "application_vnd.jupyter.stderr_output_167.png", "application_vnd.jupyter.stderr_output_79.png", "application_vnd.jupyter.stderr_output_572.png", "application_vnd.jupyter.stderr_output_386.png", "application_vnd.jupyter.stderr_output_20.png", "application_vnd.jupyter.stderr_output_49.png", "application_vnd.jupyter.stderr_output_338.png", "application_vnd.jupyter.stderr_output_126.png", "application_vnd.jupyter.stderr_output_743.png", "application_vnd.jupyter.stderr_output_753.png", "application_vnd.jupyter.stderr_output_560.png", "application_vnd.jupyter.stderr_output_333.png", "application_vnd.jupyter.stderr_output_569.png", "application_vnd.jupyter.stderr_output_218.png", "application_vnd.jupyter.stderr_output_63.png", "application_vnd.jupyter.stderr_output_446.png", "application_vnd.jupyter.stderr_output_494.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_621.png", "application_vnd.jupyter.stderr_output_36.png", "application_vnd.jupyter.stderr_output_607.png", "application_vnd.jupyter.stderr_output_100.png", "application_vnd.jupyter.stderr_output_430.png", "application_vnd.jupyter.stderr_output_266.png", "application_vnd.jupyter.stderr_output_681.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_22.png", "application_vnd.jupyter.stderr_output_363.png", "application_vnd.jupyter.stderr_output_748.png", "application_vnd.jupyter.stderr_output_169.png", "application_vnd.jupyter.stderr_output_547.png", "application_vnd.jupyter.stderr_output_683.png", "application_vnd.jupyter.stderr_output_415.png", "application_vnd.jupyter.stderr_output_166.png", "application_vnd.jupyter.stderr_output_343.png", "application_vnd.jupyter.stderr_output_508.png", "application_vnd.jupyter.stderr_output_318.png", "application_vnd.jupyter.stderr_output_567.png", "application_vnd.jupyter.stderr_output_292.png", "application_vnd.jupyter.stderr_output_726.png", "application_vnd.jupyter.stderr_output_173.png", "application_vnd.jupyter.stderr_output_511.png", "application_vnd.jupyter.stderr_output_319.png", "application_vnd.jupyter.stderr_output_191.png", "application_vnd.jupyter.stderr_output_399.png", "application_vnd.jupyter.stderr_output_744.png", "application_vnd.jupyter.stderr_output_408.png", "application_vnd.jupyter.stderr_output_374.png", "application_vnd.jupyter.stderr_output_500.png", "application_vnd.jupyter.stderr_output_433.png", "application_vnd.jupyter.stderr_output_213.png", "application_vnd.jupyter.stderr_output_186.png", "application_vnd.jupyter.stderr_output_168.png", "application_vnd.jupyter.stderr_output_613.png", "application_vnd.jupyter.stderr_output_349.png", "application_vnd.jupyter.stderr_output_56.png", "application_vnd.jupyter.stderr_output_452.png", "application_vnd.jupyter.stderr_output_645.png", "application_vnd.jupyter.stderr_output_397.png", "application_vnd.jupyter.stderr_output_104.png", "application_vnd.jupyter.stderr_output_491.png", "application_vnd.jupyter.stderr_output_196.png", "application_vnd.jupyter.stderr_output_50.png", "application_vnd.jupyter.stderr_output_429.png", "application_vnd.jupyter.stderr_output_736.png", "application_vnd.jupyter.stderr_output_295.png", "application_vnd.jupyter.stderr_output_114.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_101.png", "application_vnd.jupyter.stderr_output_359.png", "application_vnd.jupyter.stderr_output_492.png", "application_vnd.jupyter.stderr_output_267.png", "application_vnd.jupyter.stderr_output_225.png", "application_vnd.jupyter.stderr_output_209.png", "application_vnd.jupyter.stderr_output_226.png", "application_vnd.jupyter.stderr_output_139.png", "application_vnd.jupyter.stderr_output_717.png", "application_vnd.jupyter.stderr_output_1.png", "application_vnd.jupyter.stderr_output_128.png", "application_vnd.jupyter.stderr_output_150.png", "application_vnd.jupyter.stderr_output_533.png", "application_vnd.jupyter.stderr_output_556.png", "application_vnd.jupyter.stderr_output_217.png", "application_vnd.jupyter.stderr_output_61.png", "application_vnd.jupyter.stderr_output_51.png", "application_vnd.jupyter.stderr_output_142.png", "application_vnd.jupyter.stderr_output_326.png", "application_vnd.jupyter.stderr_output_311.png", "application_vnd.jupyter.stderr_output_304.png", "application_vnd.jupyter.stderr_output_353.png", "application_vnd.jupyter.stderr_output_530.png", "application_vnd.jupyter.stderr_output_138.png", "application_vnd.jupyter.stderr_output_412.png", "application_vnd.jupyter.stderr_output_548.png", "application_vnd.jupyter.stderr_output_161.png", "application_vnd.jupyter.stderr_output_379.png", "application_vnd.jupyter.stderr_output_200.png", "application_vnd.jupyter.stderr_output_427.png", "application_vnd.jupyter.stderr_output_280.png", "application_vnd.jupyter.stderr_output_707.png", "application_vnd.jupyter.stderr_output_122.png", "application_vnd.jupyter.stderr_output_488.png", "application_vnd.jupyter.stderr_output_624.png", "application_vnd.jupyter.stderr_output_94.png", "application_vnd.jupyter.stderr_output_233.png", "application_vnd.jupyter.stderr_output_153.png", "application_vnd.jupyter.stderr_output_282.png", "application_vnd.jupyter.stderr_output_730.png", "application_vnd.jupyter.stderr_output_45.png", "application_vnd.jupyter.stderr_output_659.png", "application_vnd.jupyter.stderr_output_462.png", "application_vnd.jupyter.stderr_output_639.png", "application_vnd.jupyter.stderr_output_652.png", "application_vnd.jupyter.stderr_output_182.png", "application_vnd.jupyter.stderr_output_158.png", "application_vnd.jupyter.stderr_output_425.png", "application_vnd.jupyter.stderr_output_78.png", "application_vnd.jupyter.stderr_output_509.png", "application_vnd.jupyter.stderr_output_698.png", "application_vnd.jupyter.stderr_output_370.png", "application_vnd.jupyter.stderr_output_175.png", "application_vnd.jupyter.stderr_output_276.png", "application_vnd.jupyter.stderr_output_188.png", "application_vnd.jupyter.stderr_output_601.png", "application_vnd.jupyter.stderr_output_696.png", "application_vnd.jupyter.stderr_output_457.png", "application_vnd.jupyter.stderr_output_14.png", "application_vnd.jupyter.stderr_output_562.png", "application_vnd.jupyter.stderr_output_773.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_119.png", "application_vnd.jupyter.stderr_output_581.png", "application_vnd.jupyter.stderr_output_741.png", "application_vnd.jupyter.stderr_output_309.png", "application_vnd.jupyter.stderr_output_107.png", "application_vnd.jupyter.stderr_output_255.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_523.png", "application_vnd.jupyter.stderr_output_733.png", "application_vnd.jupyter.stderr_output_719.png", "application_vnd.jupyter.stderr_output_43.png", "application_vnd.jupyter.stderr_output_357.png", "application_vnd.jupyter.stderr_output_595.png", "application_vnd.jupyter.stderr_output_706.png", "application_vnd.jupyter.stderr_output_265.png", "application_vnd.jupyter.stderr_output_350.png", "application_vnd.jupyter.stderr_output_685.png", "application_vnd.jupyter.stderr_output_54.png", "application_vnd.jupyter.stderr_output_85.png", "application_vnd.jupyter.stderr_output_521.png", "application_vnd.jupyter.stderr_output_118.png", "application_vnd.jupyter.stderr_output_154.png", "application_vnd.jupyter.stderr_output_438.png", "application_vnd.jupyter.stderr_output_442.png", "application_vnd.jupyter.stderr_output_198.png", "application_vnd.jupyter.stderr_output_712.png", "application_vnd.jupyter.stderr_output_597.png", "application_vnd.jupyter.stderr_output_761.png", "application_vnd.jupyter.stderr_output_746.png", "application_vnd.jupyter.stderr_output_460.png", "application_vnd.jupyter.stderr_output_549.png", "application_vnd.jupyter.stderr_output_271.png", "application_vnd.jupyter.stderr_output_55.png", "application_vnd.jupyter.stderr_output_501.png", "application_vnd.jupyter.stderr_output_228.png", "application_vnd.jupyter.stderr_output_303.png", "application_vnd.jupyter.stderr_output_614.png", "application_vnd.jupyter.stderr_output_254.png", "application_vnd.jupyter.stderr_output_147.png", "application_vnd.jupyter.stderr_output_375.png", "application_vnd.jupyter.stderr_output_121.png", "application_vnd.jupyter.stderr_output_582.png", "application_vnd.jupyter.stderr_output_451.png", "application_vnd.jupyter.stderr_output_628.png", "application_vnd.jupyter.stderr_output_466.png", "application_vnd.jupyter.stderr_output_703.png", "application_vnd.jupyter.stderr_output_340.png", "application_vnd.jupyter.stderr_output_329.png", "application_vnd.jupyter.stderr_output_208.png", "application_vnd.jupyter.stderr_output_243.png", "application_vnd.jupyter.stderr_output_759.png", "application_vnd.jupyter.stderr_output_199.png", "application_vnd.jupyter.stderr_output_565.png", "application_vnd.jupyter.stderr_output_248.png", "application_vnd.jupyter.stderr_output_573.png", "application_vnd.jupyter.stderr_output_210.png", "application_vnd.jupyter.stderr_output_92.png", "application_vnd.jupyter.stderr_output_164.png", "application_vnd.jupyter.stderr_output_129.png", "application_vnd.jupyter.stderr_output_102.png", "application_vnd.jupyter.stderr_output_507.png", "application_vnd.jupyter.stderr_output_251.png", "application_vnd.jupyter.stderr_output_557.png", "application_vnd.jupyter.stderr_output_410.png", "application_vnd.jupyter.stderr_output_40.png", "application_vnd.jupyter.stderr_output_315.png", "application_vnd.jupyter.stderr_output_532.png", "application_vnd.jupyter.stderr_output_37.png", "application_vnd.jupyter.stderr_output_244.png", "application_vnd.jupyter.stderr_output_701.png", "application_vnd.jupyter.stderr_output_264.png", "application_vnd.jupyter.stderr_output_486.png", "application_vnd.jupyter.stderr_output_646.png", "application_vnd.jupyter.stderr_output_434.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) plt.figure(figsize=(12, 12)) sns.heatmap(train_df.corr(), cmap='coolwarm', annot=True, fmt='.2f')
code
129026803/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape
code
129026803/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df.head()
code
129026803/cell_2
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns
code
129026803/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() # create subplots fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() # loop over features and plot scatter plot for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') # adjust subplot spacing # plt.subplots_adjust(hspace=0.5) # display plot plt.show() train_df.columns train_df.describe().T from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42) X_train = train_data.drop(columns=['yield']) y_train = train_data['yield'] X_test = test_data.drop(columns=['yield']) y_test = test_data['yield'] model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mae = mean_absolute_error(y_test, y_pred) predictions_on_test_df = model.predict(test_df) predictions_on_test_df
code
129026803/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129026803/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) train_df.hist(figsize=(14, 14), xrot=45) plt.show()
code
129026803/cell_17
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() # create subplots fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() # loop over features and plot scatter plot for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') # adjust subplot spacing # plt.subplots_adjust(hspace=0.5) # display plot plt.show() train_df.columns train_df.describe().T from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42) X_train = train_data.drop(columns=['yield']) y_train = train_data['yield'] X_test = test_data.drop(columns=['yield']) y_test = test_data['yield'] model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mae = mean_absolute_error(y_test, y_pred) print('MAE:', mae)
code
129026803/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() # create subplots fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() # loop over features and plot scatter plot for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') # adjust subplot spacing # plt.subplots_adjust(hspace=0.5) # display plot plt.show() train_df.columns train_df.describe().T
code
129026803/cell_22
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split, GridSearchCV import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() # create subplots fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() # loop over features and plot scatter plot for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') # adjust subplot spacing # plt.subplots_adjust(hspace=0.5) # display plot plt.show() train_df.columns train_df.describe().T from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42) X_train = train_data.drop(columns=['yield']) y_train = train_data['yield'] X_test = test_data.drop(columns=['yield']) y_test = test_data['yield'] import pandas as pd from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import mean_absolute_error train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42) X_train = train_data.drop(columns=['yield']) y_train = train_data['yield'] X_test = test_data.drop(columns=['yield']) y_test = test_data['yield'] rf_model = RandomForestRegressor() gb_model = GradientBoostingRegressor() rf_param_grid = {'n_estimators': [5, 20, 50, 100], 'max_features': ['auto', 'sqrt'], 'max_depth': [int(x) for x in np.linspace(10, 120, num=12)], 'min_samples_split': [2, 6, 10], 'min_samples_leaf': [1, 3, 4], 'bootstrap': [True, False]} gb_param_grid = {'learning_rate': [0.01, 0.1], 'n_estimators': [100, 500, 1000], 'max_depth': [3, 5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [1, 2, 4], 'subsample': [0.5, 0.8, 1.0], 'max_features': ['sqrt', 'log2', None]} rf_gs = GridSearchCV(rf_model, rf_param_grid, cv=5, n_jobs=-1) gb_gs = GridSearchCV(gb_model, gb_param_grid, cv=5, n_jobs=-1) rf_gs.fit(X_train, y_train) gb_gs.fit(X_train, y_train) rf_pred = rf_gs.predict(X_test) gb_pred = gb_gs.predict(X_test) rf_mae = mean_absolute_error(y_test, rf_pred) gb_mae = mean_absolute_error(y_test, gb_pred) print('Random Forest MAE: ', rf_mae) print('Gradient Boosting MAE: ', gb_mae)
code
129026803/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train_df.shape train_df = train_df.drop(['id'], axis=1) test_df = test_df.drop(['id'], axis=1) columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] train_df = train_df.drop(columns=columns_to_drop) test_df = test_df.drop(columns=columns_to_drop) features = train_df.columns.tolist() fig, axes = plt.subplots(4, 3, figsize=(12, 12)) axes = axes.ravel() for ax, feature in zip(axes, features): ax.scatter(train_df[feature], train_df['yield']) ax.set_xlabel(feature) ax.set_ylabel('yield') plt.show()
code
104117646/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a a
code
104117646/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a b = np.array([1, 2, 3, 4.5]) b c = np.arange(10) c d = np.arange(10, 20) d e = np.arange(10, 20, 2) e
code
104117646/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a b = np.array([1, 2, 3, 4.5]) b c = np.arange(10) c d = np.arange(10, 20) d e = np.arange(10, 20, 2) e f = np.linspace(1, 10, 10) f
code
104117646/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a b = np.array([1, 2, 3, 4.5]) b c = np.arange(10) c
code
104117646/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a b = np.array([1, 2, 3, 4.5]) b c = np.arange(10) c d = np.arange(10, 20) d
code
104117646/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a a.ndim a.shape
code
104117646/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a
code
104117646/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a a.ndim
code
104117646/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.array([2, 3, 4, 5]) a b = np.array([1, 2, 3, 4.5]) b
code
17115081/cell_21
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.info()
code
17115081/cell_34
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() test.isnull().sum() train_df = train.drop(['Cabin', 'Ticket'], axis=1) test_df = test.drop(['Cabin', 'Ticket'], axis=1) train_df['Age'] = train_df['Age'].fillna(train['Age'].median()) test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median()) test_df['Age'] = test_df['Age'].fillna(train['Age'].median()) train_df['Embarked'] = train['Embarked'].fillna('S') test_df.describe(include='all')
code
17115081/cell_23
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum()
code
17115081/cell_30
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() test.isnull().sum() train_df = train.drop(['Cabin', 'Ticket'], axis=1) test_df = test.drop(['Cabin', 'Ticket'], axis=1) train_df['Age'] = train_df['Age'].fillna(train['Age'].median()) test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median()) test_df['Age'] = test_df['Age'].fillna(train['Age'].median()) train_df['Embarked'] = train['Embarked'].fillna('S') print('Train Data:') print(train_df.isnull().sum()) print('#' * 30) print('\nTest Data:') print(test_df.isnull().sum())
code
17115081/cell_33
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() test.isnull().sum() train_df = train.drop(['Cabin', 'Ticket'], axis=1) test_df = test.drop(['Cabin', 'Ticket'], axis=1) train_df['Age'] = train_df['Age'].fillna(train['Age'].median()) test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median()) test_df['Age'] = test_df['Age'].fillna(train['Age'].median()) train_df['Embarked'] = train['Embarked'].fillna('S') train_df.describe(include='all')
code
17115081/cell_20
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.info()
code
17115081/cell_29
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() test.isnull().sum() train_df = train.drop(['Cabin', 'Ticket'], axis=1) test_df = test.drop(['Cabin', 'Ticket'], axis=1) print(train_df.shape) print(test_df.shape)
code
17115081/cell_26
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() train.describe(include='all')
code
17115081/cell_18
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head(10)
code
17115081/cell_24
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.isnull().sum()
code
17115081/cell_27
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv import os os.listdir('../input') train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.isnull().sum() test.describe(include='all')
code
18124779/cell_9
[ "image_output_1.png" ]
from pandas import DataFrame import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from pandas import DataFrame performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]} df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance']) df time_graph = df.plot.bar(x="date", y="time", rot=0) time_graph.set_xlabel("Date") time_graph.set_ylabel("Time") km_graph = df.plot.bar(x="date", y="km", rot=0) km_graph.set_xlabel("Date") km_graph.set_ylabel("Km") rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0) rider_performance_graph.set_xlabel("Date") rider_performance_graph.set_ylabel("Rider perforamce") horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0) horse_performance_graph.set_xlabel("Date") horse_performance_graph.set_ylabel("Horse perforamce") avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0) avg_performance_graph.set_xlabel("Date") avg_performance_graph.set_ylabel("Average perforamce") performance_df = pd.DataFrame({'Rider performance': df['rider_performance'], 'Horse performance': df['horse_performance']}) perfrormance_graph_comparison1 = performance_df.plot.bar(rot=0)
code
18124779/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import DataFrame import pandas as pd from pandas import DataFrame performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]} df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance']) df time_graph = df.plot.bar(x='date', y='time', rot=0) time_graph.set_xlabel('Date') time_graph.set_ylabel('Time')
code