path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105178234/cell_32
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df.head()
code
105178234/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df.head()
code
105178234/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum()
code
105178234/cell_16
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum()
code
105178234/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.head()
code
105178234/cell_35
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape df[df['target'] == 1][['num_char', 'NUm_words', 'Num_sentence']].describe()
code
105178234/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape import matplotlib.pyplot as plt plt.pie(df['target'].value_counts(), labels=['ham', 'spam'], autopct='%0.3f')
code
105178234/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.head()
code
105178234/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.head(3)
code
105178234/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/sms-spam-collection-dataset/spam.csv', encoding='latin-1') df.shape df.drop(columns=['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], inplace=True) df.rename(columns={'v1': 'target', 'v2': 'text'}, inplace=True) df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates(keep='first') df.duplicated().sum() df.shape import matplotlib.pyplot as plt plt.figure(figsize=(12, 6)) sns.histplot(df[df['target'] == 0]['NUm_words'], color='green') sns.histplot(df[df['target'] == 1]['NUm_words'], color='red')
code
50227013/cell_9
[ "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import MaxPooling2D,Flatten,Dense,LSTM,Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D, Flatten, Dense, LSTM, Dropout try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset_train = pd.read_csv('../input/gooogle-stock-price/Google_Stock_Price_Train.csv') training_set = dataset_train.iloc[:, 1:2].values from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range=(0, 1)) training_set_scaled = sc.fit_transform(training_set) X_train = [] y_train = [] for i in range(60, 1258): X_train.append(training_set_scaled[i - 60:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = (np.array(X_train), np.array(y_train)) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape[1] with tpu_strategy.scope(): regressor = Sequential() regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1))) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50)) regressor.add(Dropout(0.2)) regressor.add(Dense(units=1)) regressor.compile(optimizer='adam', loss='mean_squared_error') regressor.fit(X_train, y_train, epochs=500, batch_size=32) regressor.summary() regressor.save('regressorEpochs500_batchSize32.h5') dataset_test = pd.read_csv('../input/gooogle-stock-price/Google_Stock_Price_Test.csv') real_stock_price = dataset_test.iloc[:, 1:2].values dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis=0) inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values inputs = inputs.reshape(-1, 1) inputs = sc.transform(inputs) X_test = [] for i in range(60, len(inputs)): X_test.append(inputs[i - 60:i, 0]) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_stock_price = regressor.predict(X_test) predicted_stock_price = sc.inverse_transform(predicted_stock_price) plt.plot(real_stock_price, color='red', label='Real Google Stock Price') plt.plot(predicted_stock_price, color='blue', label='Predicted Google Stock Price') plt.title('Google Stock Price Prediction') plt.xlabel('Date') plt.ylabel('Google Stock Price') plt.legend() plt.show()
code
50227013/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset_train = pd.read_csv('../input/gooogle-stock-price/Google_Stock_Price_Train.csv') training_set = dataset_train.iloc[:, 1:2].values from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range=(0, 1)) training_set_scaled = sc.fit_transform(training_set) X_train = [] y_train = [] for i in range(60, 1258): X_train.append(training_set_scaled[i - 60:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = (np.array(X_train), np.array(y_train)) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape[1]
code
50227013/cell_6
[ "text_plain_output_1.png" ]
print('done')
code
50227013/cell_2
[ "text_plain_output_1.png" ]
import tensorflow as tf import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D, Flatten, Dense, LSTM, Dropout try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
code
50227013/cell_1
[ "text_plain_output_1.png" ]
# !pip install tensorflow==2.2-rc1 !pip install tensorflow
code
50227013/cell_7
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import MaxPooling2D,Flatten,Dense,LSTM,Dropout from tensorflow.keras.models import Sequential import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D, Flatten, Dense, LSTM, Dropout try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset_train = pd.read_csv('../input/gooogle-stock-price/Google_Stock_Price_Train.csv') training_set = dataset_train.iloc[:, 1:2].values from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range=(0, 1)) training_set_scaled = sc.fit_transform(training_set) X_train = [] y_train = [] for i in range(60, 1258): X_train.append(training_set_scaled[i - 60:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = (np.array(X_train), np.array(y_train)) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape[1] with tpu_strategy.scope(): regressor = Sequential() regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1))) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50)) regressor.add(Dropout(0.2)) regressor.add(Dense(units=1)) regressor.compile(optimizer='adam', loss='mean_squared_error') regressor.fit(X_train, y_train, epochs=500, batch_size=32) regressor.summary()
code
50227013/cell_5
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import MaxPooling2D,Flatten,Dense,LSTM,Dropout from tensorflow.keras.models import Sequential import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D, Flatten, Dense, LSTM, Dropout try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset_train = pd.read_csv('../input/gooogle-stock-price/Google_Stock_Price_Train.csv') training_set = dataset_train.iloc[:, 1:2].values from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range=(0, 1)) training_set_scaled = sc.fit_transform(training_set) X_train = [] y_train = [] for i in range(60, 1258): X_train.append(training_set_scaled[i - 60:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = (np.array(X_train), np.array(y_train)) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape[1] with tpu_strategy.scope(): regressor = Sequential() regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1))) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units=50)) regressor.add(Dropout(0.2)) regressor.add(Dense(units=1)) regressor.compile(optimizer='adam', loss='mean_squared_error') regressor.fit(X_train, y_train, epochs=500, batch_size=32)
code
33096624/cell_13
[ "image_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find()
code
33096624/cell_25
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50') learn.unfreeze() learn.lr_find() lr1 = learn.recorder.min_grad_lr lr1 learn.fit_one_cycle(5, max_lr=slice(lr1 / 100, lr1 / 10, lr1))
code
33096624/cell_4
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls()
code
33096624/cell_23
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50') learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True)
code
33096624/cell_20
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50')
code
33096624/cell_1
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33096624/cell_7
[ "text_html_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) data.show_batch(3, figsize=(7, 6))
code
33096624/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr)
code
33096624/cell_8
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) data.show_batch(4, figsize=(7, 6))
code
33096624/cell_15
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr
code
33096624/cell_16
[ "text_plain_output_1.png" ]
torch.cuda.is_available()
code
33096624/cell_17
[ "text_html_output_1.png", "text_plain_output_1.png" ]
torch.cuda.is_available() torch.backends.cudnn.enabled
code
33096624/cell_24
[ "text_html_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50') learn.unfreeze() learn.lr_find() lr1 = learn.recorder.min_grad_lr lr1
code
33096624/cell_14
[ "image_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() learn.recorder.plot(suggestion=True)
code
33096624/cell_22
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50') learn.unfreeze() learn.lr_find()
code
33096624/cell_10
[ "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() path = Path('/kaggle/input/pretrained-pytorch-models/resnet50-19c8e357.pth') path.cwd()
code
33096624/cell_27
[ "text_html_output_1.png", "text_plain_output_1.png" ]
path = Path('../input/normal-vs-camouflage-clothes/8k_normal_vs_camouflage_clothes_images') path.ls() bs = 64 data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=get_transforms(), size=bs).normalize(imagenet_stats) arch = resnet50 learn = cnn_learner(data, arch, metrics=[error_rate, accuracy], model_dir='/kaggle/working').to_fp16() learn.model_dir = '/kaggle/working' learn.lr_find() lr = learn.recorder.min_grad_lr lr learn.fit_one_cycle(5, lr) learn.save('stage-1-50') learn.load('stage-1-50') learn.unfreeze() learn.lr_find() lr1 = learn.recorder.min_grad_lr lr1 learn.fit_one_cycle(5, max_lr=slice(lr1 / 100, lr1 / 10, lr1)) learn.save('model-2') learn.load('model-2')
code
2030951/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diabetes.csv') data.head()
code
2030951/cell_20
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/diabetes.csv') correlations = data.corr() correlations['Outcome'].sort_values(ascending=False) def visualise(data): fig, ax = plt.subplots() ax.scatter(data.iloc[:,1].values, data.iloc[:,5].values) ax.set_title('Highly Correlated Features') ax.set_xlabel('Plasma glucose concentration') ax.set_ylabel('Body mass index') visualise(data) data[['Glucose', 'BMI']] = data[['Glucose', 'BMI']].replace(0, np.NaN) data.dropna(inplace=True) X = data[['Glucose', 'BMI']].values y = data[['Outcome']].values from sklearn.preprocessing import StandardScaler sc = StandardScaler() X = sc.fit_transform(X) print(X[0:10, :])
code
2030951/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diabetes.csv') correlations = data.corr() correlations['Outcome'].sort_values(ascending=False)
code
2030951/cell_26
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train.ravel()) y_pred = model.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) print(cm)
code
2030951/cell_18
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/diabetes.csv') correlations = data.corr() correlations['Outcome'].sort_values(ascending=False) def visualise(data): fig, ax = plt.subplots() ax.scatter(data.iloc[:,1].values, data.iloc[:,5].values) ax.set_title('Highly Correlated Features') ax.set_xlabel('Plasma glucose concentration') ax.set_ylabel('Body mass index') visualise(data) data[['Glucose', 'BMI']] = data[['Glucose', 'BMI']].replace(0, np.NaN) data.dropna(inplace=True) X = data[['Glucose', 'BMI']].values y = data[['Outcome']].values from sklearn.preprocessing import StandardScaler sc = StandardScaler() X = sc.fit_transform(X) mean = np.mean(X, axis=0) print('Mean: (%d, %d)' % (mean[0], mean[1])) standard_deviation = np.std(X, axis=0) print('Standard deviation: (%d, %d)' % (standard_deviation[0], standard_deviation[1]))
code
2030951/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train.ravel()) y_pred = model.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) def precision_recall(y_test, y_pred): cm = confusion_matrix(y_test, y_pred) tp = cm[0, 0] fp = cm[0, 1] fn = cm[1, 0] prec = tp / (tp + fp) rec = tp / (tp + fn) return (prec, rec) precision, recall = precision_recall(y_test, y_pred) print('Precision: %f Recall %f' % (precision, recall))
code
2030951/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/diabetes.csv') correlations = data.corr() correlations['Outcome'].sort_values(ascending=False) def visualise(data): fig, ax = plt.subplots() ax.scatter(data.iloc[:, 1].values, data.iloc[:, 5].values) ax.set_title('Highly Correlated Features') ax.set_xlabel('Plasma glucose concentration') ax.set_ylabel('Body mass index') visualise(data)
code
2030951/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/diabetes.csv') correlations = data.corr() correlations['Outcome'].sort_values(ascending=False) def visualise(data): fig, ax = plt.subplots() ax.scatter(data.iloc[:,1].values, data.iloc[:,5].values) ax.set_title('Highly Correlated Features') ax.set_xlabel('Plasma glucose concentration') ax.set_ylabel('Body mass index') visualise(data) data[['Glucose', 'BMI']] = data[['Glucose', 'BMI']].replace(0, np.NaN) data.dropna(inplace=True) visualise(data)
code
2017076/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_store_info.info() air_store_info.head()
code
2017076/cell_9
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') visits.info() visits.describe()
code
2017076/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') hpg_reserve.head()
code
2017076/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors'] air_reserve.columns = cols hpg_reserve.columns = cols reserves = pd.DataFrame(columns=cols) reserves = pd.concat([air_reserve, hpg_reserve]) reserves['visit_datetime'] = pd.to_datetime(reserves['visit_datetime']) reserves['reserve_datetime'] = pd.to_datetime(reserves['reserve_datetime']) print('Number of restaurants from AirREGI = ', str(len(air_reserve['store_id'].unique()))) print('Number of restaurants from hpg = ', str(len(hpg_reserve['store_id'].unique())))
code
2017076/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') dates.head()
code
2017076/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') sns.countplot(x='visitors', data=visits)
code
2017076/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') print('Areas:\n') air_store_info['air_area_name'].unique()
code
2017076/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') hpg_store_info.info() hpg_store_info.head()
code
2017076/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') air_reserve.head()
code
2017076/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') print('Cuisines:') air_store_info['air_genre_name'].unique()
code
2017076/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') relation.info()
code
2017076/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors'] air_reserve.columns = cols hpg_reserve.columns = cols reserves = pd.DataFrame(columns=cols) reserves = pd.concat([air_reserve, hpg_reserve]) reserves['visit_datetime'] = pd.to_datetime(reserves['visit_datetime']) reserves['reserve_datetime'] = pd.to_datetime(reserves['reserve_datetime']) reserves.info() reserves.describe()
code
32071161/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72062686/cell_2
[ "text_plain_output_1.png" ]
!pip install dtreeviz !pip install graphviz
code
72062686/cell_10
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.tree import DecisionTreeClassifier iris = datasets.load_iris() model = DecisionTreeClassifier(random_state=42) model.fit(iris.data, iris.target) model.predict(iris.data)
code
72062686/cell_12
[ "text_plain_output_1.png" ]
from dtreeviz.trees import dtreeviz from sklearn import datasets from sklearn.tree import DecisionTreeClassifier iris = datasets.load_iris() model = DecisionTreeClassifier(random_state=42) model.fit(iris.data, iris.target) model.predict(iris.data) viz = dtreeviz(model, iris.data, iris.target, target_name='target', feature_names=iris.feature_names, class_names=list(iris.target_names)) viz.save('regression.svg') viz
code
72104768/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from functools import partial import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import Adam, Nadam from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D from tensorflow.keras.layers import Reshape, MaxPooling1D, BatchNormalization, AveragePooling1D, Conv1D from tensorflow.keras.layers import Activation, LeakyReLU from tensorflow.keras.optimizers import SGD, Adam, Nadam from tensorflow.keras.models import Model, load_model from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.regularizers import l2, l1_l2 from keras.losses import MeanSquaredError from tensorflow.keras.utils import get_custom_objects from tensorflow.keras.layers import Activation, LeakyReLU from tabular import gelu, Mish, mish from tabular import TabularTransformer, DataGenerator
code
72104768/cell_1
[ "text_plain_output_1.png" ]
##### DEEP LEARNING FOR TABULAR DATA ########################## # The functions used in this Kernel are based on: # https://github.com/lmassaron/deep_learning_for_tabular_data # You can watch the full tutorial presented at the DEVFEST 2019 # explaining how to process tabular data with TensorFlow: # https://www.youtube.com/watch?v=nQgUt_uADSE ################################################################ !wget https://raw.githubusercontent.com/lmassaron/deep_learning_for_tabular_data/master/tabular.py
code
72104768/cell_15
[ "text_plain_output_1.png" ]
from functools import partial from keras.losses import MeanSquaredError from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from tabular import TabularTransformer, DataGenerator from tabular import gelu, Mish, mish from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D from tensorflow.keras.layers import Reshape, MaxPooling1D,BatchNormalization, AveragePooling1D, Conv1D from tensorflow.keras.models import Model, load_model from tensorflow.keras.optimizers import Adam, Nadam from tensorflow.keras.optimizers import SGD, Adam, Nadam import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf X = pd.read_csv('../input/30-days-of-ml/train.csv') X_test = pd.read_csv('../input/30-days-of-ml/test.csv') y = X.target X = X.set_index('id').drop('target', axis='columns') X_test = X_test.set_index('id') y_stratified = pd.cut(y, bins=10, labels=False) categoricals = [item for item in X.columns if 'cat' in item] cat_values = np.unique(X[categoricals].values) cat_dict = dict(zip(cat_values, range(len(cat_values)))) X[categoricals] = X[categoricals].replace(cat_dict) X_test[categoricals] = X_test[categoricals].replace(cat_dict) numeric_variables = [item for item in X.columns if 'cont' in item] categorical_variables = categoricals ordinal_variables = categorical_variables def tabular_dnn(numeric_variables, categorical_variables=None, categorical_counts=None, feature_selection_dropout=0.2, categorical_dropout=0.1, first_dense=256, second_dense=256, dense_dropout=0.2, activation_type=gelu): numerical_inputs = Input(shape=(numeric_variables,)) numerical_normalization = BatchNormalization()(numerical_inputs) numerical_feature_selection = Dropout(feature_selection_dropout)(numerical_normalization) if categorical_variables is not None: categorical_inputs = [] categorical_embeddings = [] for category in categorical_variables: categorical_inputs.append(Input(shape=[1], name=category)) category_counts = categorical_counts[category] categorical_embeddings.append(Embedding(category_counts + 1, int(np.log1p(category_counts) + 1), name=category + '_embed')(categorical_inputs[-1])) categorical_logits = Concatenate(name='categorical_conc')([Flatten()(SpatialDropout1D(categorical_dropout)(cat_emb)) for cat_emb in categorical_embeddings]) x = concatenate([numerical_feature_selection, categorical_logits]) else: x = numerical_feature_selection x = Dense(first_dense, activation=activation_type)(x) x = Dropout(dense_dropout)(x) x = Dense(second_dense, activation=activation_type)(x) x = Dropout(dense_dropout)(x) output = Dense(1)(x) if categorical_variables is not None: model = Model([numerical_inputs] + categorical_inputs, output) else: model = Model([numerical_inputs], output) return model # Useful functions def RMSE(y_true, y_pred): return tf.py_function(partial(mean_squared_error, squared=False), (y_true, y_pred), tf.double) def compile_model(model, loss, metrics, optimizer): model.compile(loss=loss, metrics=metrics, optimizer=optimizer) return model def plot_keras_history(history, measures): """ history: Keras training history measures = list of names of measures """ rows = len(measures) // 2 + len(measures) % 2 fig, panels = plt.subplots(rows, 2, figsize=(15, 5)) plt.subplots_adjust(top = 0.99, bottom=0.01, hspace=0.4, wspace=0.2) try: panels = [item for sublist in panels for item in sublist] except: pass for k, measure in enumerate(measures): panel = panels[k] panel.set_title(measure + ' history') panel.plot(history.epoch, history.history[measure], label="Train "+measure) panel.plot(history.epoch, history.history["val_"+measure], label="Validation "+measure) panel.set(xlabel='epochs', ylabel=measure) panel.legend() plt.show(fig) measure_to_monitor = 'val_RMSE' modality = 'min' early_stopping = EarlyStopping(monitor=measure_to_monitor, mode=modality, patience=3, verbose=0) model_checkpoint = ModelCheckpoint('best.model', monitor=measure_to_monitor, mode=modality, save_best_only=True, verbose=0) skf = StratifiedKFold(n_splits=Config.folds, shuffle=True, random_state=Config.seed) score = list() oof = np.zeros(len(X)) best_iteration = list() for fold, (train_idx, test_idx) in enumerate(skf.split(X, y_stratified)): tb = TabularTransformer(numeric=numeric_variables, ordinal=[], lowcat=[], highcat=categorical_variables) tb.fit(X.iloc[train_idx]) sizes = tb.shape(X.iloc[train_idx]) categorical_levels = dict(zip(categorical_variables, sizes[1:])) model = tabular_dnn(numeric_variables=sizes[0], categorical_variables=categorical_variables, categorical_counts=categorical_levels, feature_selection_dropout=0.0, categorical_dropout=0.0, first_dense=64, second_dense=64, dense_dropout=0.0, activation_type='relu') model = compile_model(model, loss='mean_squared_error', metrics=[MeanSquaredError(name='MSE'), RMSE], optimizer=Adam(learning_rate=0.0001)) train_batch = DataGenerator(X.iloc[train_idx], y[train_idx], tabular_transformer=tb, batch_size=Config.batch_size, shuffle=True) history = model.fit(train_batch, validation_data=(tb.transform(X.iloc[test_idx]), y[test_idx]), epochs=Config.epochs, callbacks=[model_checkpoint, early_stopping], verbose=1) best_iteration.append(np.argmin(history.history['val_RMSE']) + 1) preds = model.predict(tb.transform(X.iloc[test_idx]), verbose=1, batch_size=1024).flatten() oof[test_idx] = preds score.append(mean_squared_error(y_true=y[test_idx], y_pred=preds, squared=False)) tb = TabularTransformer(numeric=numeric_variables, ordinal=[], lowcat=[], highcat=categorical_variables) tb.fit(X) sizes = tb.shape(X) categorical_levels = dict(zip(categorical_variables, sizes[1:])) print(f'Input array sizes: {sizes}') print(f'Categorical levels: {categorical_levels}\n') model = tabular_dnn(numeric_variables=sizes[0], categorical_variables=categorical_variables, categorical_counts=categorical_levels, feature_selection_dropout=0.0, categorical_dropout=0.0, first_dense=64, second_dense=64, dense_dropout=0.0, activation_type='relu') model = compile_model(model, loss='mean_squared_error', metrics=[MeanSquaredError(name='MSE'), RMSE], optimizer=Adam(learning_rate=0.0001)) train_batch = DataGenerator(X, y, tabular_transformer=tb, batch_size=Config.batch_size, shuffle=True) best_epochs = int(np.median(best_iteration)) print(f'Training for {best_epochs} epochs') history = model.fit(train_batch, epochs=best_epochs, verbose=1)
code
72104768/cell_14
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from functools import partial from keras.losses import MeanSquaredError from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from tabular import TabularTransformer, DataGenerator from tabular import gelu, Mish, mish from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D from tensorflow.keras.layers import Reshape, MaxPooling1D,BatchNormalization, AveragePooling1D, Conv1D from tensorflow.keras.models import Model, load_model from tensorflow.keras.optimizers import Adam, Nadam from tensorflow.keras.optimizers import SGD, Adam, Nadam import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf X = pd.read_csv('../input/30-days-of-ml/train.csv') X_test = pd.read_csv('../input/30-days-of-ml/test.csv') y = X.target X = X.set_index('id').drop('target', axis='columns') X_test = X_test.set_index('id') y_stratified = pd.cut(y, bins=10, labels=False) categoricals = [item for item in X.columns if 'cat' in item] cat_values = np.unique(X[categoricals].values) cat_dict = dict(zip(cat_values, range(len(cat_values)))) X[categoricals] = X[categoricals].replace(cat_dict) X_test[categoricals] = X_test[categoricals].replace(cat_dict) numeric_variables = [item for item in X.columns if 'cont' in item] categorical_variables = categoricals ordinal_variables = categorical_variables def tabular_dnn(numeric_variables, categorical_variables=None, categorical_counts=None, feature_selection_dropout=0.2, categorical_dropout=0.1, first_dense=256, second_dense=256, dense_dropout=0.2, activation_type=gelu): numerical_inputs = Input(shape=(numeric_variables,)) numerical_normalization = BatchNormalization()(numerical_inputs) numerical_feature_selection = Dropout(feature_selection_dropout)(numerical_normalization) if categorical_variables is not None: categorical_inputs = [] categorical_embeddings = [] for category in categorical_variables: categorical_inputs.append(Input(shape=[1], name=category)) category_counts = categorical_counts[category] categorical_embeddings.append(Embedding(category_counts + 1, int(np.log1p(category_counts) + 1), name=category + '_embed')(categorical_inputs[-1])) categorical_logits = Concatenate(name='categorical_conc')([Flatten()(SpatialDropout1D(categorical_dropout)(cat_emb)) for cat_emb in categorical_embeddings]) x = concatenate([numerical_feature_selection, categorical_logits]) else: x = numerical_feature_selection x = Dense(first_dense, activation=activation_type)(x) x = Dropout(dense_dropout)(x) x = Dense(second_dense, activation=activation_type)(x) x = Dropout(dense_dropout)(x) output = Dense(1)(x) if categorical_variables is not None: model = Model([numerical_inputs] + categorical_inputs, output) else: model = Model([numerical_inputs], output) return model # Useful functions def RMSE(y_true, y_pred): return tf.py_function(partial(mean_squared_error, squared=False), (y_true, y_pred), tf.double) def compile_model(model, loss, metrics, optimizer): model.compile(loss=loss, metrics=metrics, optimizer=optimizer) return model def plot_keras_history(history, measures): """ history: Keras training history measures = list of names of measures """ rows = len(measures) // 2 + len(measures) % 2 fig, panels = plt.subplots(rows, 2, figsize=(15, 5)) plt.subplots_adjust(top = 0.99, bottom=0.01, hspace=0.4, wspace=0.2) try: panels = [item for sublist in panels for item in sublist] except: pass for k, measure in enumerate(measures): panel = panels[k] panel.set_title(measure + ' history') panel.plot(history.epoch, history.history[measure], label="Train "+measure) panel.plot(history.epoch, history.history["val_"+measure], label="Validation "+measure) panel.set(xlabel='epochs', ylabel=measure) panel.legend() plt.show(fig) measure_to_monitor = 'val_RMSE' modality = 'min' early_stopping = EarlyStopping(monitor=measure_to_monitor, mode=modality, patience=3, verbose=0) model_checkpoint = ModelCheckpoint('best.model', monitor=measure_to_monitor, mode=modality, save_best_only=True, verbose=0) skf = StratifiedKFold(n_splits=Config.folds, shuffle=True, random_state=Config.seed) score = list() oof = np.zeros(len(X)) best_iteration = list() for fold, (train_idx, test_idx) in enumerate(skf.split(X, y_stratified)): tb = TabularTransformer(numeric=numeric_variables, ordinal=[], lowcat=[], highcat=categorical_variables) tb.fit(X.iloc[train_idx]) sizes = tb.shape(X.iloc[train_idx]) categorical_levels = dict(zip(categorical_variables, sizes[1:])) model = tabular_dnn(numeric_variables=sizes[0], categorical_variables=categorical_variables, categorical_counts=categorical_levels, feature_selection_dropout=0.0, categorical_dropout=0.0, first_dense=64, second_dense=64, dense_dropout=0.0, activation_type='relu') model = compile_model(model, loss='mean_squared_error', metrics=[MeanSquaredError(name='MSE'), RMSE], optimizer=Adam(learning_rate=0.0001)) train_batch = DataGenerator(X.iloc[train_idx], y[train_idx], tabular_transformer=tb, batch_size=Config.batch_size, shuffle=True) history = model.fit(train_batch, validation_data=(tb.transform(X.iloc[test_idx]), y[test_idx]), epochs=Config.epochs, callbacks=[model_checkpoint, early_stopping], verbose=1) best_iteration.append(np.argmin(history.history['val_RMSE']) + 1) preds = model.predict(tb.transform(X.iloc[test_idx]), verbose=1, batch_size=1024).flatten() oof[test_idx] = preds score.append(mean_squared_error(y_true=y[test_idx], y_pred=preds, squared=False)) print('Average RMSE %0.3f ± %0.3f' % (np.mean(score), np.std(score))) print('RMSE OOF %0.3f' % mean_squared_error(y_true=y, y_pred=oof, squared=False))
code
88086649/cell_21
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, StandardScaler import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr X = df.drop('Weight', axis=1) y = df['Weight'] (X.shape, y.shape) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, StandardScaler ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough') transformed_X = ct.fit_transform(X, y) transformed_X[0]
code
88086649/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() df['Species'].value_counts()
code
88086649/cell_25
[ "image_output_1.png" ]
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
code
88086649/cell_30
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) y_pred[:10]
code
88086649/cell_33
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.metrics import r2_score print('R2 Score', r2_score(y_test, y_pred))
code
88086649/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.describe()
code
88086649/cell_40
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_log_error, r2_score from sklearn.metrics import r2_score (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(n_estimators=200) reg.fit(X_train, y_train) y_pred_reg = reg.predict(X_test) from sklearn.metrics import mean_squared_log_error, r2_score print('Mean Squared Log Error', mean_squared_log_error(y_test, y_pred_reg)) print('R2 Score', r2_score(y_test, y_pred_reg))
code
88086649/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr
code
88086649/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.info()
code
88086649/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr X = df.drop('Weight', axis=1) y = df['Weight'] (X.shape, y.shape) (type(X), type(y))
code
88086649/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum()
code
88086649/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr sns.boxplot(x='Species', y='Weight', data=df)
code
88086649/cell_38
[ "text_plain_output_1.png" ]
import numpy as np (X_train.shape, X_test.shape, y_train.shape, y_test.shape) np.array(y_test) np.array(y_test)
code
88086649/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr X = df.drop('Weight', axis=1) y = df['Weight'] (X.shape, y.shape)
code
88086649/cell_35
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(n_estimators=200) reg.fit(X_train, y_train)
code
88086649/cell_31
[ "text_plain_output_1.png" ]
import numpy as np (X_train.shape, X_test.shape, y_train.shape, y_test.shape) np.array(y_test)
code
88086649/cell_22
[ "image_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, StandardScaler import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr X = df.drop('Weight', axis=1) y = df['Weight'] (X.shape, y.shape) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, StandardScaler ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough') transformed_X = ct.fit_transform(X, y) transformed_X
code
88086649/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() df['Species'].value_counts().plot.bar()
code
88086649/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train)
code
88086649/cell_37
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(n_estimators=200) reg.fit(X_train, y_train) y_pred_reg = reg.predict(X_test) y_pred_reg
code
88086649/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fish-market/Fish.csv') df.isna().sum() corr = df.corr() corr plt.figure(figsize=(11, 8)) sns.heatmap(corr, cmap='Greens', annot=True) plt.show()
code
88086649/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fish-market/Fish.csv') df.head()
code
130011577/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes thai_accident_df.describe().T import matplotlib.pyplot as plt import seaborn as sns thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day) def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()): df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)] return df counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count') print(counts.tail()) plt.figure(figsize=(14, 7)) sns.lineplot(data=counts, x='month', y='count', hue='year') plt.show()
code
130011577/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() print(thai_accident_df.shape)
code
130011577/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes print(thai_accident_df.isnull().sum()) thai_accident_df.describe().T
code
130011577/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df.tail()
code
130011577/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes thai_accident_df.describe().T import matplotlib.pyplot as plt import seaborn as sns thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day) def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()): df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)] return df counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count') gender_count = thai_accident_df['gender'].value_counts().reset_index() gender_count.columns = ['gender', 'g_count'] gender_count['%'] = gender_count['g_count'] / gender_count['g_count'].sum() * 100 print('## Accident by year ##') year_df = thai_accident_df['accident_date'].dt.year.value_counts().reset_index(name='c') year_df.columns = ['year', 'count'] plt.title('All accident occur each year') sns.barplot(data=year_df, x='year', y='count') plt.ylabel('count') plt.xlabel('Year') plt.show() plt.title('All appear accident_date') sns.lineplot(data=year_df, x='year', y='count') plt.ylabel('count') plt.xlabel('Year') plt.show() plt.figure(figsize=(14, 7)) plt.title('Use Histogram') sns.histplot(data=thai_accident_df['year'], discrete=True, element='step') plt.show() print('Describe accident by year') print(thai_accident_df['year'].value_counts().describe())
code
130011577/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import geopandas as gpd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130011577/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes thai_accident_df.describe().T import matplotlib.pyplot as plt import seaborn as sns thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day) def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()): df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)] return df print('Ready for Data visualization')
code
130011577/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') print(f'Chech Datatype\n{df.dtypes}') print('\nShape check') print(df.shape) print() print(df.isnull().sum()) df['official_death_date'] = pd.to_datetime(df['official_death_date'])
code
130011577/cell_10
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes thai_accident_df.describe().T import matplotlib.pyplot as plt import seaborn as sns thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day) def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()): df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)] return df counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count') print('Simple data below\n') print(f"All accident data from {thai_accident_df['accident_date'].min().date()} to {thai_accident_df['accident_date'].max().date()} \n{thai_accident_df.shape[0]} cases\n") print('# By Gender') gender_count = thai_accident_df['gender'].value_counts().reset_index() gender_count.columns = ['gender', 'g_count'] gender_count['%'] = gender_count['g_count'] / gender_count['g_count'].sum() * 100 print(gender_count) print('\n# By Vehicle type') print(thai_accident_df['vehicle_type'].value_counts()) print('\n# By province') print(thai_accident_df['province_en'].value_counts()) print(thai_accident_df['province_en'].value_counts().describe())
code
130011577/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes thai_accident_df.describe().T import matplotlib.pyplot as plt import seaborn as sns thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day) def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()): df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)] return df counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count') gender_count = thai_accident_df['gender'].value_counts().reset_index() gender_count.columns = ['gender', 'g_count'] gender_count['%'] = gender_count['g_count'] / gender_count['g_count'].sum() * 100 year_df = thai_accident_df['accident_date'].dt.year.value_counts().reset_index(name='c') year_df.columns = ['year', 'count'] print('Try......') fig, ax = plt.subplots(1, 2, figsize=(12, 5)) fig.suptitle('1st try subplots', y=1.05) sns.histplot(ax=ax[0], x=thai_accident_df['year'], discrete=True) ax[0].set_title('Plot using histogram', y=1.05) ax[0].bar_label(ax[0].containers[1], rotation=45) sns.barplot(ax=ax[1], data=year_df, x='year', y='count') ax[1].set_title('Plot using barplot', y=1.05) ax[1].bar_label(ax[1].containers[0], rotation=35) ax[1].set_xticklabels(ax[1].get_xticklabels(), rotation=45) plt.show()
code
130011577/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv') df['official_death_date'] = pd.to_datetime(df['official_death_date']) thai_accident_df = df.dropna(subset='accident_date').copy() thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date']) thai_accident_df.dtypes
code
16127029/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/loan.csv', low_memory=False) data = data[(data.loan_status == 'Fully Paid') | (data.loan_status == 'Default')] data['target'] = data.loan_status == 'Fully Paid' data.shape x = data.loan_amnt term_groups = data.groupby('term') term_groups['int_rate'].mean() grade_groups = data.groupby('grade') grade_groups['int_rate'].mean() total_loaned = grade_groups['funded_amnt'].sum() print(total_loaned)
code
16127029/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/loan.csv', low_memory=False) data = data[(data.loan_status == 'Fully Paid') | (data.loan_status == 'Default')] data['target'] = data.loan_status == 'Fully Paid' data.shape x = data.loan_amnt term_groups = data.groupby('term') term_groups['int_rate'].mean() term_groups['int_rate'].std()
code
16127029/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/loan.csv', low_memory=False) data = data[(data.loan_status == 'Fully Paid') | (data.loan_status == 'Default')] data['target'] = data.loan_status == 'Fully Paid' data.shape x = data.loan_amnt term_groups = data.groupby('term') term_groups['int_rate'].mean() grade_groups = data.groupby('grade') grade_groups['int_rate'].mean() X = pd.get_dummies(data[['term', 'verification_status', 'purpose', 'policy_code', 'loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'emp_length', 'addr_state']]) count_class_0, count_class_1 = data.target.value_counts() data_class_0 = data[data['target'] == 1] data_class_1 = data[data['target'] == 0] data_class_0_under = data_class_0.sample(count_class_1) data_test_under = pd.concat([data_class_0_under, data_class_1], axis=0) data_class_1_over = data_class_1.sample(count_class_0, replace=True) data_test_over = pd.concat([data_class_0, data_class_1_over], axis=0) print('Random over-sampling:') print(data_test_over.target.value_counts()) data_test_over.target.value_counts().plot(kind='bar', title='Count (target )')
code
16127029/cell_20
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/loan.csv', low_memory=False) data = data[(data.loan_status == 'Fully Paid') | (data.loan_status == 'Default')] data['target'] = data.loan_status == 'Fully Paid' data.shape x = data.loan_amnt term_groups = data.groupby('term') term_groups['int_rate'].mean() grade_groups = data.groupby('grade') grade_groups['int_rate'].mean() X = pd.get_dummies(data[['term', 'verification_status', 'purpose', 'policy_code', 'loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'emp_length', 'addr_state']]) X.shape y = data['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) X_train.shape
code
16127029/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/loan.csv', low_memory=False) data = data[(data.loan_status == 'Fully Paid') | (data.loan_status == 'Default')] data['target'] = data.loan_status == 'Fully Paid' data.shape print(f"The mean of loan amount: {data['loan_amnt'].mean()}") print(f"The median of loan amount: {data['loan_amnt'].median()}") print(f"The maximum of loan amount: {data['loan_amnt'].max()}") print(f"The standard deviation of loan amount: {data['loan_amnt'].std()}")
code