path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
122249691/cell_14 | [
"text_plain_output_1.png"
] | from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.layers import Dense, Flatten
import PIL
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pathlib
import tensorflow as tf
import pathlib
dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
img_height, img_width = (180, 180)
batch_size = 32
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(6):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
resnet_model = Sequential()
pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet')
for layer in pretrained_model.layers:
layer.trainable = False
resnet_model.add(pretrained_model)
resnet_model.add(Flatten())
resnet_model.add(Dense(512, activation='relu'))
resnet_model.add(Dense(5, activation='softmax'))
resnet_model.summary()
resnet_model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
epochs = 10
history = resnet_model.fit(train_ds, validation_data=val_ds, epochs=epochs)
fig1 = plt.gcf()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.axis(ymin=0.4, ymax=1)
plt.grid()
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'validation'])
plt.show() | code |
122249691/cell_10 | [
"text_plain_output_1.png"
] | from tensorflow.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten
import PIL
import pathlib
import tensorflow as tf
import pathlib
dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
img_height, img_width = (180, 180)
batch_size = 32
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
resnet_model = Sequential()
pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet')
for layer in pretrained_model.layers:
layer.trainable = False
resnet_model.add(pretrained_model)
resnet_model.add(Flatten())
resnet_model.add(Dense(512, activation='relu'))
resnet_model.add(Dense(5, activation='softmax')) | code |
122249691/cell_5 | [
"image_output_1.png"
] | import PIL
import pathlib
import tensorflow as tf
import pathlib
dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
roses = list(data_dir.glob('roses/*'))
print(roses[0])
PIL.Image.open(str(roses[0])) | code |
90142598/cell_6 | [
"text_plain_output_1.png"
] | from keras.layers.core import Dense
from keras.layers.core import Dense
from keras.layers.core import Dense
from keras.models import Sequential
from keras.models import Sequential
from keras.models import Sequential
import numpy as np
import numpy as np
import numpy as np
import numpy as np # linear algebra
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [1], [1], [0]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=10)
scores = model.evaluate(training_data, target_data)
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [1], [1], [1]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=500)
scores = model.evaluate(training_data, target_data)
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [0], [0], [1]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=250)
scores = model.evaluate(training_data, target_data)
print('\n%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
print(model.predict(training_data).round()) | code |
90142598/cell_3 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers.core import Dense
from keras.models import Sequential
import numpy as np
import numpy as np # linear algebra
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [1], [1], [0]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=10)
scores = model.evaluate(training_data, target_data)
print('\n%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
print(model.predict(training_data).round()) | code |
90142598/cell_5 | [
"text_plain_output_1.png"
] | from keras.layers.core import Dense
from keras.layers.core import Dense
from keras.models import Sequential
from keras.models import Sequential
import numpy as np
import numpy as np
import numpy as np # linear algebra
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [1], [1], [0]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=10)
scores = model.evaluate(training_data, target_data)
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [1], [1], [1]], 'float32')
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=500)
scores = model.evaluate(training_data, target_data)
print('\n%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
print(model.predict(training_data).round()) | code |
18116047/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
import seaborn as sns
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.kurtosis(hosp.age)
hosp.isnull().sum()
plt.figure(figsize=(20, 10))
sns.countplot(x='age', data=hosp, palette='bwr')
plt.title('Distibution of Age')
plt.xticks(rotation=90)
plt.show() | code |
18116047/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.describe(hosp.age) | code |
18116047/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes | code |
18116047/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
hosp.head(5) | code |
18116047/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.info() | code |
18116047/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as scipy
from scipy import stats
import os
print(os.listdir('../input')) | code |
18116047/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
hosp['AdmitDiagnosis'].unique().shape | code |
18116047/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
hosp['age'].unique().shape | code |
18116047/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
import seaborn as sns
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.kurtosis(hosp.age)
hosp.isnull().sum()
plt.xticks(rotation=90)
scipy.stats.chisquare(hosp.age) | code |
18116047/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.describe() | code |
18116047/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
import seaborn as sns
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.kurtosis(hosp.age)
hosp.isnull().sum()
plt.xticks(rotation=90)
plt.figure(figsize=(20, 20))
sns.heatmap(cbar=False, annot=True, data=hosp.corr() * 100, cmap='coolwarm')
plt.title('% Corelation Matrix')
plt.show() | code |
18116047/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.kurtosis(hosp.age) | code |
18116047/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as scipy
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape
scipy.stats.kurtosis(hosp.age)
hosp.isnull().sum() | code |
18116047/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hosp = pd.read_csv('../input/mimic3d.csv')
hosp.dtypes
hosp.shape | code |
1005822/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
"""
binning_range:
[(95.786, 106.7] < (106.7, 117.4] < (117.4, 128.1] < (128.1, 138.8] < ... <
(267.2, 277.9] < (277.9, 288.6] < (288.6, 299.3] < (299.3, 310]]
"""
sns.pointplot(x=df['number_project'], y=df['satisfaction_level']) | code |
1005822/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
plt.scatter(df['satisfaction_level'], df['average_montly_hours'])
plt.ylabel('average_montly_hours')
plt.xlabel('satisfaction_level') | code |
1005822/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any() | code |
1005822/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
sns.pointplot(df1['average_montly_hours'], df1['satisfaction_level'])
'\nbinning_range:\n[(95.786, 106.7] < (106.7, 117.4] < (117.4, 128.1] < (128.1, 138.8] < ... <\n (267.2, 277.9] < (277.9, 288.6] < (288.6, 299.3] < (299.3, 310]]\n' | code |
1005822/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
sns.heatmap(df.corr(), vmax=0.8, square=True, annot=True, fmt='.2f') | code |
1005822/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
print(results.mean()) | code |
1005822/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
sns.pointplot(df1['last_evaluation'], df['average_montly_hours']) | code |
1005822/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
sns.pointplot(df['number_project'], df['last_evaluation']) | code |
1005822/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True)) | code |
1005822/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.pointplot(x=df['number_project'], y=df['average_montly_hours']) | code |
1005822/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
df1['average_montly_hours'].head() | code |
1005822/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.describe() | code |
1005822/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
plt.scatter(df['satisfaction_level'], df['last_evaluation'])
plt.xlabel('satisfaction_level')
plt.ylabel('last_evaluation') | code |
1005822/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20)
projects = df['number_project'].unique()
projects = sorted(projects)
for i in projects:
mean_satisfaction_level = df['satisfaction_level'][df['number_project'] == i].mean()
print('project_total', i, ':', mean_satisfaction_level) | code |
1005822/cell_10 | [
"text_html_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.barplot(df['left'], df['satisfaction_level']) | code |
1005822/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
result_list = []
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True)) | code |
128000273/cell_42 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelGB = train_model(GradientBoostingClassifier(n_estimators=175)) | code |
128000273/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
dtrain.describe() | code |
128000273/cell_9 | [
"image_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum() | code |
128000273/cell_34 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelRF = train_model(RandomForestClassifier(bootstrap=True, random_state=0, n_estimators=20, criterion='entropy')) | code |
128000273/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
dtrain.head() | code |
128000273/cell_44 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
dtest.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
le = preprocessing.LabelEncoder()
dtest['VIP'] = le.fit_transform(dtest['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtest[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtest[['Destination']]).toarray())
dtest['Earth'] = encoder_home[0]
dtest['Europa'] = encoder_home[1]
dtest['Mars'] = encoder_home[2]
dtest['CANCRI'] = encoder_dest[0]
dtest['PSO'] = encoder_dest[1]
dtest['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelGB = train_model(GradientBoostingClassifier(n_estimators=175))
pd.Series(modelGB.feature_importances_, index=X.columns).sort_values().plot.barh() | code |
128000273/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
print(dtrain['Destination'].unique()) | code |
128000273/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtest.head() | code |
128000273/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
dtrain.info() | code |
128000273/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
def func(pct, allvals):
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
return f'{pct:.1f}%\n({absolute:d})'
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('VIP').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('VIP').Transported.count()
plt.pie(transported_count, autopct=lambda pct: func(pct, transported_count), textprops=dict(color='w'))
plt.title('VIP that were transported', fontsize=16)
print(transported_count) | code |
128000273/cell_45 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
dtest.isna().sum()
def func(pct, allvals):
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
return f'{pct:.1f}%\n({absolute:d})'
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('VIP').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('VIP').Transported.count()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
le = preprocessing.LabelEncoder()
dtest['VIP'] = le.fit_transform(dtest['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtest[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtest[['Destination']]).toarray())
dtest['Earth'] = encoder_home[0]
dtest['Europa'] = encoder_home[1]
dtest['Mars'] = encoder_home[2]
dtest['CANCRI'] = encoder_dest[0]
dtest['PSO'] = encoder_dest[1]
dtest['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelGB = train_model(GradientBoostingClassifier(n_estimators=175))
pd.Series(modelGB.feature_importances_, index=X.columns).sort_values().plot.barh()
list = zip(dtrain['Age'], dtrain['Earth'], dtrain['Europa'], dtrain['Mars'], dtrain['ShoppingMall'], dtrain['Spa'], dtrain['VRDeck'], dtrain['FoodCourt'], dtrain['RoomService'], dtrain['CANCRI'], dtrain['PSO'], dtrain['TRAPPIST'], dtrain['Transported'])
data = pd.DataFrame(list)
corrmat = data.corr()
plt.figure(figsize=(10, 10))
sns.heatmap(data=corrmat, annot=True, cmap='RdYlGn', xticklabels=['Age', 'Earth', 'Europa', 'Mars', 'ShoppingMall', 'Spa', 'VRDeck', 'FoodCourt', 'RoomService', 'CANCRI', 'PSO', 'TRAPPIST', 'Transported'], yticklabels=['Age', 'Earth', 'Europa', 'Mars', 'ShoppingMall', 'Spa', 'VRDeck', 'FoodCourt', 'RoomService', 'CANCRI', 'PSO', 'TRAPPIST', 'Transported'])
plt.show() | code |
128000273/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
def func(pct, allvals):
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
return f'{pct:.1f}%\n({absolute:d})'
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('VIP').Transported.count()
plt.pie(transported_count, autopct=lambda pct: func(pct, transported_count), textprops=dict(color='w'))
plt.title('VIP that were not transported', fontsize=16)
print(transported_count) | code |
128000273/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pickle
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelDT = train_model(DecisionTreeClassifier(criterion='entropy', random_state=0)) | code |
128000273/cell_8 | [
"image_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
print('Shape of train data: ', dtrain.shape)
print('Shape of test data: ', dtest.shape) | code |
128000273/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
def func(pct, allvals):
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
return f'{pct:.1f}%\n({absolute:d})'
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('HomePlanet').Transported.count()
plt.pie(transported_count, autopct=lambda pct: func(pct, transported_count), textprops=dict(color='w'))
plt.title('Home planet of the ones that were not transported', fontsize=16)
print(transported_count) | code |
128000273/cell_38 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import pickle
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
median_age = dtrain.Age.median()
dtrain['Age'].fillna(median_age, inplace=True)
median_mall = dtrain.ShoppingMall.median()
dtrain['ShoppingMall'].fillna(median_mall, inplace=True)
median_spa = dtrain.Spa.median()
dtrain['Spa'].fillna(median_spa, inplace=True)
median_vr = dtrain.VRDeck.median()
dtrain['VRDeck'].fillna(median_vr, inplace=True)
median_room = dtrain.RoomService.median()
dtrain['RoomService'].fillna(median_room, inplace=True)
median_vip = dtrain.VIP.median()
dtrain['VIP'].fillna(median_vip, inplace=True)
median_food = dtrain.FoodCourt.median()
dtrain['FoodCourt'].fillna(median_food, inplace=True)
predict = dtrain['Transported']
X = dtrain[['Age', 'Earth', 'Mars', 'Europa', 'ShoppingMall', 'Spa', 'RoomService', 'VRDeck', 'FoodCourt', 'CANCRI', 'TRAPPIST', 'PSO']]
y = predict
def train_model(model_used):
best = 0
sum = 0
counter = 0
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
for i in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = model_used
model.fit(x_train, y_train)
acc = model.score(x_test, y_test)
sum += acc
counter += 1
if acc > best:
best = acc
with open('titanic.pickle', 'wb') as file:
pickle.dump(model, file)
with open('titanic.pickle', 'rb') as file:
model_trained = pickle.load(file)
return model_trained
modelKNN = train_model(KNeighborsClassifier(n_neighbors=25)) | code |
128000273/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128000273/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
def func(pct, allvals):
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
return f'{pct:.1f}%\n({absolute:d})'
transported = dtrain[dtrain['Transported'] == False]
transported_count = transported.groupby('HomePlanet').Transported.count()
transported = dtrain[dtrain['Transported'] == True]
transported_count = transported.groupby('HomePlanet').Transported.count()
plt.pie(transported_count, autopct=lambda pct: func(pct, transported_count), textprops=dict(color='w'))
plt.title('Home planet of the ones that were transported', fontsize=16)
print(transported_count) | code |
128000273/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtrain.isna().sum()
dtest.isna().sum()
le = preprocessing.LabelEncoder()
dtrain['VIP'] = le.fit_transform(dtrain['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtrain[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtrain[['Destination']]).toarray())
dtrain['Earth'] = encoder_home[0]
dtrain['Europa'] = encoder_home[1]
dtrain['Mars'] = encoder_home[2]
dtrain['CANCRI'] = encoder_dest[0]
dtrain['PSO'] = encoder_dest[1]
dtrain['TRAPPIST'] = encoder_dest[2]
le = preprocessing.LabelEncoder()
dtest['VIP'] = le.fit_transform(dtest['VIP'])
oe = preprocessing.OneHotEncoder(handle_unknown='ignore')
encoder_home = pd.DataFrame(oe.fit_transform(dtest[['HomePlanet']]).toarray())
encoder_dest = pd.DataFrame(oe.fit_transform(dtest[['Destination']]).toarray())
dtest['Earth'] = encoder_home[0]
dtest['Europa'] = encoder_home[1]
dtest['Mars'] = encoder_home[2]
dtest['CANCRI'] = encoder_dest[0]
dtest['PSO'] = encoder_dest[1]
dtest['TRAPPIST'] = encoder_dest[2]
dtest.head() | code |
128000273/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtest.isna().sum()
dtest.describe() | code |
128000273/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtest.isna().sum() | code |
128000273/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtest = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
dtest.isna().sum()
dtest.info() | code |
128000273/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
dtrain = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
dtrain.head() | code |
1007003/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | TRAIN_PATH = '../input/train.csv'
TEST_PATH = '../input/test.csv'
train = pandas.read_csv(TRAIN_PATH)
test = pandas.read_csv(TEST_PATH) | code |
33115465/cell_5 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"image_output_17.png",
"text_plain_output_9.png",
"image_output_14.png",
"image_output_23.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"image_output_18.png",
"image_output_21.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"image_output_20.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_8.png",
"image_output_27.png",
"image_output_6.png",
"image_output_12.png",
"image_output_22.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | from matplotlib.ticker import MaxNLocator
from scipy.stats import gaussian_kde
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as st
def normalize(X, x_min=0, x_max=1):
nom = (X - X.min(axis=0)) * (x_max - x_min)
denom = X.max(axis=0) - X.min(axis=0)
denom[denom == 0] = 1
return x_min + nom / denom
class BilinearMap:
def __init__(self, target_n):
self.target_cols = target_n
def compute_coeff(self, X, y):
try:
Xt = np.transpose(X)
Xp = np.dot(Xt, X)
Xpi = np.linalg.inv(Xp)
XpiXt = np.dot(Xpi, Xt)
coeff = np.dot(XpiXt, y)
except Exception as e:
regressor = LinearRegression(fit_intercept=False)
regressor.fit(X, y)
coeff = regressor.coef_
return coeff
def fit_transform(self, X, y):
target_rows = X.shape[1]
actual_rows = X.shape[0]
required_rows = target_rows * self.target_cols
if actual_rows < required_rows:
assert False, f'{required_rows} rows are required, {actual_rows} are provided'
Y = []
for i in range(self.target_cols):
start = i * target_rows
end = start + target_rows
coeff = self.compute_coeff(X[start:end, :], y[start:end])
Y.extend(coeff.tolist())
Y = np.array(Y)
Y = Y.reshape(target_rows, self.target_cols)
Z = np.dot(X, Y)
return Z
def print_input_files():
pass
def dump_text_file(fname):
pass
def dump_csv_file(fname, count=5):
df = pd.read_csv(fname)
if count < 0:
count = df.shape[0]
return
ds_nbaiot = '/kaggle/input/nbaiot-dataset'
dn_nbaiot = ['Danmini_Doorbell', 'Ecobee_Thermostat', 'Ennio_Doorbell', 'Philips_B120N10_Baby_Monitor', 'Provision_PT_737E_Security_Camera', 'Provision_PT_838_Security_Camera', 'Samsung_SNH_1011_N_Webcam', 'SimpleHome_XCS7_1002_WHT_Security_Camera', 'SimpleHome_XCS7_1003_WHT_Security_Camera']
def fname(ds, f):
if '.csv' not in f:
f = f'{f}.csv'
return os.path.join(ds, f)
def fname_nbaiot(f):
return fname(ds_nbaiot, f)
def get_nbaiot_device_files():
nbaiot_all_files = dump_csv_file(fname_nbaiot('data_summary'), -1)
nbaiot_all_files = nbaiot_all_files.iloc[:, 0:1].values
device_id = 1
indices = []
for j in range(len(nbaiot_all_files)):
if str(device_id) not in str(nbaiot_all_files[j]):
indices.append(j)
device_id += 1
nbaiot_device_files = np.split(nbaiot_all_files, indices)
return nbaiot_device_files
def get_nbaiot_device_data(device_id, count_norm=-1, count_anom=-1):
if device_id < 1 or device_id > 9:
assert False, 'Please provide a valid device ID 1-9, both inclusive'
if count_anom == -1:
count_anom = count_norm
device_index = device_id - 1
device_files = get_nbaiot_device_files()
device_file = device_files[device_index]
df = pd.DataFrame()
y = []
for i in range(len(device_file)):
fname = str(device_file[i][0])
df_c = pd.read_csv(fname_nbaiot(fname))
count = count_anom
if 'benign' in fname:
count = count_norm
rows = count if count >= 0 else df_c.shape[0]
y_np = np.ones(rows) if 'benign' in fname else np.zeros(rows)
y.extend(y_np.tolist())
df = pd.concat([df.iloc[:, :].reset_index(drop=True), df_c.iloc[:rows, :].reset_index(drop=True)], axis=0)
X = df.iloc[:, :].values
y = np.array(y)
return (X, y)
def get_nbaiot_devices_data():
devices_data = []
for i in range(9):
device_id = i + 1
X, y = get_nbaiot_device_data(device_id)
devices_data.append((X, y))
return devices_data
# Visualization Functions
def plot_scatter_nbaiot_device(device_data, device_id, dim3=True):
if device_id < 1 or device_id > 9:
assert False, "Please provide a valid device ID 1-9, both inclusive"
device_index = device_id-1
print("scatter plot for", dn_nbaiot[device_index])
(X, y) = device_data
X_std = StandardScaler().fit_transform(X)
#bmap = BilinearMap(target_n = 2)
#X_bmap = bmap.fit_transform(X_std, y)
bmap = PCA(n_components=2)
X_bmap = bmap.fit_transform(X_std)
print("X_bmap.shape:", X_bmap.shape, "X_std.shape:", X_std.shape)
data_X = X_bmap[:,0]
data_Y = X_bmap[:,1]
data_Z = y
data = np.column_stack((data_X, data_Y, data_Z))
#if dim3:
plot_3d_scatter(data, dn_nbaiot[device_index], 'PCA1', 'PCA2', 'Normal or Anomalous')
#else:
normal = mpatches.Patch(color='green', label='N')
anomalous = mpatches.Patch(color='red', label='A')
handles = [normal, anomalous]
plot_2d_scatter(data, dn_nbaiot[device_index], 'PCA1', 'PCA2', handles)
def plot_surface_nbaiot_device(device_data, device_id):
if device_id < 1 or device_id > 9:
assert False, "Please provide a valid device ID 1-9, both inclusive"
device_index = device_id-1
print("scatter plot for", dn_nbaiot[device_index])
(X, y) = device_data
X_std = StandardScaler().fit_transform(X)
#bmap = BilinearMap(target_n = 3)
#X_bmap = bmap.fit_transform(X_std, y)
bmap = PCA(n_components=2)
X_bmap = bmap.fit_transform(X_std)
print("X_bmap.shape:", X_bmap.shape, "X_std.shape:", X_std.shape)
plot_3d_scatter_surface(X_bmap, dn_nbaiot[device_index], 'PCA1', 'PCA2', 'PCA3')
########################################################################
# Visualization related functions
def plot_3d_histogram(data):
cols = data.shape[1]
if cols < 2:
assert False, 'The number of columns should be 2'
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = data[:,0]
Y = data[:,1]
bins = 10
hist, xedges, yedges = np.histogram2d(X, Y, bins=bins, range=[[0, bins*0.6], [0, bins*0.6]])
# Construct arrays for the anchor positions of the bars.
xpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25, indexing="ij")
xpos = xpos.ravel()
ypos = ypos.ravel()
zpos = 0
# Construct arrays with the dimensions for the 16 bars.
dx = dy = 0.5 * np.ones_like(zpos)
dz = hist.ravel()
cmap = cm.get_cmap('cool')
max_height = np.max(dz)
min_height = np.min(dz)
rgba = [cmap((k-min_height)/max_height) for k in dz]
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, zsort='average', color=rgba)
plt.show()
def plot_3d_surface(data, func):
cols = data.shape[1]
if cols < 2:
assert False, 'The number of columns should be 2'
X = data[:,0]
Y = data[:,1]
X, Y = np.meshgrid(X, Y)
Z = func(X, Y)
#print(Z.shape)
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_title('surface');
def plot_3d_scatter(data, title=None, xlabel=None, ylabel=None, zlabel=None):
cols = data.shape[1]
if cols < 3:
assert False, 'The number of columns should be 3'
X = data[:,0]
Y = data[:,1]
Z = data[:,2]
ax = plt.axes(projection='3d')
ax.scatter(X, Y, Z, c = Z, cmap='RdYlGn')
ax.set_title(title);
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
plt.show()
def plot_3d_scatter_trisurf(data, title=None, xlabel=None, ylabel=None, zlabel=None):
cols = data.shape[1]
if cols < 3:
assert False, 'The number of columns should be 3'
X = data[:,0]
Y = data[:,1]
Z = data[:,2]
'''Xmin = int(np.floor(np.amin(X)))
Xmax = int(np.ceil(np.amax(X)))
Ymin = int(np.floor(np.amin(Y)))
Ymax = int(np.ceil(np.amax(Y)))
print("extrems:", Xmin, Xmax, Ymin, Ymax)
sqmin = min(Xmin, Ymin)
sqmax = max(Xmax, Ymax)
print("sq min/max:", sqmin, sqmax)
x = range(sqmin, sqmax)
y = range(sqmin, sqmax)
XX, YY = np.meshgrid(x, y)
ZZ = np.zeros_like(XX)
dim = X.shape[0]
print('dim:', dim)
Xi = X.astype(int)
Yi = Y.astype(int)
#print('Xi', Xi, 'Yi', Yi, 'X', X, 'Y', Y)
for i in range(dim):
row = Xi[i]
col = Yi[i]
val = 50 #Z[i]
#print("row, col, val:", row, col, val)
ZZ[row][col] += val
'''
# Plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title(title);
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
#ax.plot_surface(XX, YY, ZZ)
surf = ax.plot_trisurf(X - X.mean(), Y - Y.mean(), Z - Z.mean(), cmap=cm.jet, linewidth=0.1)
fig.colorbar(surf)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.zaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
plt.show()
def plot_3d_scatter_surface(data, title=None, xlabel=None, ylabel=None, zlabel=None):
#plot_3d_scatter_trisurf(data, title, xlabel, ylabel, zlabel)
#plot_3d_scatter_fxy(data, title, xlabel, ylabel, zlabel)
plot_3d_scatter_kde(data, title, xlabel, ylabel, zlabel)
def plot_3d_scatter_fxy(data, title=None, xlabel=None, ylabel=None, zlabel=None):
cols = data.shape[1]
if cols < 2:
assert False, 'The number of columns should be 2'
X = data[:,0]
Y = data[:,1]
#Z = data[:,2]
#XX, YY = np.meshgrid(X, Y)
#ZZ = np.sinc((XX-20)/100*3.14) + np.sinc((YY-50)/100*3.14) #np.square(XX) + np.square(YY)
XY = np.vstack([X,Y])
Z = gaussian_kde(XY)(XY)
# Sort the points by density, so that the densest points are plotted last
idx = Z.argsort()
x, y, z = X[idx], Y[idx], Z[idx]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.scatter(x, y, z, c = z, cmap='jet')
#surf = ax.plot_trisurf(x - x.mean(), y - y.mean(), z, cmap=cm.jet, linewidth=0.1)
surf = ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.1)
ax.scatter(x,y,z, marker='.', s=10, c="black", alpha=0.5)
#ax.view_init(elev=60, azim=-45)
fig.colorbar(surf)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.zaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
ax.set_title(title);
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
plt.show()
return
# Plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title(title);
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
ax.plot_surface(XX, YY, ZZ)
plt.show()
def plot_3d_scatter_kde(data, title=None, xlabel=None, ylabel=None, zlabel=None):
cols = data.shape[1]
if cols < 2:
assert False, 'The number of columns should be 2'
X = data[:,0]
Y = data[:,1]
Xmin = int(np.floor(np.amin(X)))
Xmax = int(np.ceil(np.amax(X)))
Ymin = int(np.floor(np.amin(Y)))
Ymax = int(np.ceil(np.amax(Y)))
xmin = min(Xmin, Ymin)
ymin = min(Xmin, Ymin)
xmax = max(Xmax, Ymax)
ymax = max(Xmax, Ymax)
# Peform the kernel density estimate
xx, yy = np.mgrid[Xmin:Xmax:100j, Ymin:Ymax:100j]
#xx, yy = np.meshgrid(X, Y)
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([X, Y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure()
#ax = fig.gca()
ax = fig.add_subplot(111, projection='3d')
#ax.set_xlim(xmin, xmax)
#ax.set_ylim(ymin, ymax)
# Contourf plot
#cfset = ax.contourf(xx, yy, f, cmap='Blues')
ax.plot_surface(xx, yy, f - f.mean(), rstride=1, cstride=1, cmap='jet', edgecolor='none')
## Or kernel density estimate plot instead of the contourf plot
#ax.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = ax.contour(xx, yy, f, colors='k')
# Label plot
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('PCA1')
ax.set_ylabel('PCA2')
ax.set_title(title)
plt.show()
def plot_2d_scatter(data, title=None, xlabel=None, ylabel=None, handles=None):
cols = data.shape[1]
if cols < 3:
assert False, 'The number of columns should be 3'
X = data[:,0]
Y = data[:,1]
Z = data[:,2]
ax = plt.axes()
scatter = ax.scatter(X, Y, c = ['green' if z > 0.5 else 'red' for z in Z], cmap='RdYlGn')
ax.set_title(title);
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend(handles=handles)
plt.show()
for i in range(9):
device_index = i
device_id = device_index + 1
device_data = get_nbaiot_device_data(device_id)
plot_surface_nbaiot_device(device_data, device_id)
plot_scatter_nbaiot_device(device_data, device_id, False) | code |
129033410/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset['isFraud'].value_counts() | code |
129033410/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.head() | code |
129033410/cell_33 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train) | code |
129033410/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape | code |
129033410/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
x = dataset.drop(columns=['isFraud'])
y = dataset['isFraud']
y.value_counts()
y.value_counts()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
y_pred = log_reg.predict(x_test)
cvs = cross_val_score(log_reg, x, y, cv=3)
print(cvs) | code |
129033410/cell_26 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
x = dataset.drop(columns=['isFraud'])
y = dataset['isFraud']
y.value_counts()
y.value_counts() | code |
129033410/cell_41 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
x = dataset.drop(columns=['isFraud'])
y = dataset['isFraud']
y.value_counts()
y.value_counts()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
y_pred = log_reg.predict(x_test)
cvs = cross_val_score(log_reg, x, y, cv=3)
cvs.mean() | code |
129033410/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.describe() | code |
129033410/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129033410/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.info() | code |
129033410/cell_45 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import chi2, SelectKBest
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
x = dataset.drop(columns=['isFraud'])
y = dataset['isFraud']
y.value_counts()
y.value_counts()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
best_feat = SelectKBest(chi2, k=8)
kbest = best_feat.fit_transform(x, y)
np.array(dataset.drop(columns=['isFraud']).columns)[best_feat.get_support()] | code |
129033410/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
plt.figure(figsize=(10, 5))
sns.heatmap(dataset.corr(), annot=True) | code |
129033410/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True) | code |
129033410/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum() | code |
129033410/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
dataset.describe() | code |
129033410/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
y_pred = log_reg.predict(x_test)
print(classification_report(y_test, y_pred)) | code |
129033410/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
dataset.describe(include='object') | code |
129033410/cell_35 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
y_pred = log_reg.predict(x_test)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
confusion_matrix(y_test, y_pred) | code |
129033410/cell_24 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
x = dataset.drop(columns=['isFraud'])
y = dataset['isFraud']
y.value_counts() | code |
129033410/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
plt.pie(dataset['isFraud'].value_counts(), autopct='%.2f%%') | code |
129033410/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.isna().sum()
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.preprocessing import LabelEncoder
encoder = {}
for i in dataset.select_dtypes('object').columns:
encoder[i] = LabelEncoder()
dataset[i] = encoder[i].fit_transform(dataset[i])
dataset.head() | code |
129033410/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/financial-dataset-for-fraud-detection-in-a-comapny/Fraud.csv')
dataset.shape
dataset.nunique().sort_values(ascending=True)
target = 'isFraud'
features = [feature for feature in dataset.columns if feature not in [target]]
dataset.head() | code |
129033410/cell_12 | [
"text_plain_output_1.png"
] | import seaborn as sns | code |
129033410/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
y_pred = log_reg.predict(x_test)
accuracy_score(y_test, y_pred) | code |
104120688/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2) | code |
104120688/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df | code |
104120688/cell_29 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data.drop(['Age'], axis=1, inplace=True)
x = data.drop(['Survived'], axis=1, inplace=True)
y = data['Survived'] | code |
104120688/cell_26 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data.drop(['Age'], axis=1, inplace=True)
data | code |
104120688/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2) | code |
104120688/cell_19 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
df | code |
104120688/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104120688/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col | code |
104120688/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
df['Sex'].unique() | code |
104120688/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum() | code |
104120688/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df | code |
18137853/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os, os.path
from xml.etree import ElementTree as ET
def parse_annotation(fname):
objects = []
for child in ET.parse(fname).findall('object'):
dog = {}
dog['name'] = child.find('name').text
dog['pose'] = child.find('pose').text
dog['difficult'] = int(child.find('difficult').text)
dog['truncated'] = int(child.find('truncated').text)
bbox = child.find('bndbox')
dog['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)]
objects.append(dog)
return objects
IMAGE_DIR = '../input/all-dogs/all-dogs'
dog_imgs = pd.DataFrame(os.listdir(IMAGE_DIR), columns=['filename'])
dog_imgs['basename'] = dog_imgs['filename'].str.split('.').apply(lambda x: x[0])
dog_imgs[['class', 'id']] = dog_imgs['basename'].str.split('_', expand=True)
dog_imgs = dog_imgs.set_index('basename').sort_index()
ANNOTATION_DIR = '../input/annotation/Annotation'
dog_breeds = pd.DataFrame(os.listdir(ANNOTATION_DIR), columns=['dirname'])
dog_breeds[['class', 'breedname']] = dog_breeds['dirname'].str.split('-', 1, expand=True)
dog_breeds = dog_breeds.set_index('class').sort_index()
dog_imgs['annotation_filename'] = dog_imgs.apply(lambda x: os.path.join(ANNOTATION_DIR, dog_breeds.loc[x['class']]['dirname'], x.name), axis=1)
dog_imgs['objects'] = dog_imgs['annotation_filename'].apply(parse_annotation)
doggo = dog_imgs.sample(1).iloc[0]
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import imgaug.augmenters as iaa
pil_im = Image.open(os.path.join(IMAGE_DIR, doggo['filename']))
im = np.asarray(pil_im)
fig, ax = plt.subplots(1)
ax.imshow(im)
h, w, c = im.shape
for dog in doggo['objects']:
xmin, ymin, xmax, ymax = dog['bbox']
print(h, w, ':', xmin, ymin, xmax, ymax)
bbox = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(bbox)
plt.show()
fig, ax = plt.subplots(1)
dog = doggo.objects[0]
h, w, c = im.shape
xmin, ymin, xmax, ymax = dog['bbox']
pil_crop = pil_im.crop((xmin, ymin, xmax, ymax)).resize((64, 64))
im2 = np.asarray(pil_crop)
ax.imshow(im2)
plt.show() | code |
18137853/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, os.path
print(os.listdir('../input')) | code |
18137853/cell_7 | [
"image_output_2.png",
"image_output_1.png"
] | from keras.optimizers import Adam
import tensorflow as tf
from keras.optimizers import Adam
from keras import backend as K
class AdamWithWeightnorm(Adam):
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= 1.0 / (1.0 + self.decay * K.cast(self.iterations, K.floatx()))
t = K.cast(self.iterations + 1, K.floatx())
lr_t = lr * K.sqrt(1.0 - K.pow(self.beta_2, t)) / (1.0 - K.pow(self.beta_1, t))
shapes = [K.get_variable_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
ps = K.get_variable_shape(p)
if len(ps) > 1:
V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)
V_scaler_shape = K.get_variable_shape(V_scaler)
m_g = K.zeros(V_scaler_shape)
v_g = K.zeros(V_scaler_shape)
m_g_t = self.beta_1 * m_g + (1.0 - self.beta_1) * grad_g
v_g_t = self.beta_2 * v_g + (1.0 - self.beta_2) * K.square(grad_g)
new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) + self.epsilon)
self.updates.append(K.update(m_g, m_g_t))
self.updates.append(K.update(v_g, v_g_t))
m_t = self.beta_1 * m + (1.0 - self.beta_1) * grad_V
v_t = self.beta_2 * v + (1.0 - self.beta_2) * K.square(grad_V)
new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
if getattr(p, 'constraint', None) is not None:
new_V_param = p.constraint(new_V_param)
add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)
else:
m_t = self.beta_1 * m + (1.0 - self.beta_1) * g
v_t = self.beta_2 * v + (1.0 - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
import tensorflow as tf
def get_weightnorm_params_and_grads(p, g):
ps = K.get_variable_shape(p)
V_scaler_shape = (ps[-1],)
V_scaler = K.ones(V_scaler_shape)
norm_axes = [i for i in range(len(ps) - 1)]
V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])
V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
g_param = V_scaler * V_norm
grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)
return (V, V_norm, V_scaler, g_param, grad_g, grad_V)
def add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):
ps = K.get_variable_shape(new_V_param)
norm_axes = [i for i in range(len(ps) - 1)]
new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))
new_V_scaler = new_g_param / new_V_norm
new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param
updates.append(K.update(W, new_W))
updates.append(K.update(V_scaler, new_V_scaler))
def data_based_init(model, input):
if type(input) is dict:
feed_dict = input
elif type(input) is list:
feed_dict = {tf_inp: np_inp for tf_inp, np_inp in zip(model.inputs, input)}
else:
feed_dict = {model.inputs[0]: input}
if model.uses_learning_phase and K.learning_phase() not in feed_dict:
feed_dict.update({K.learning_phase(): 1})
layer_output_weight_bias = []
for l in model.layers:
trainable_weights = l.trainable_weights
if len(trainable_weights) == 2:
W, b = trainable_weights
assert l.built
layer_output_weight_bias.append((l.name, l.get_output_at(0), W, b))
sess = K.get_session()
for l, o, W, b in layer_output_weight_bias:
print('Performing data dependent initialization for layer ' + l)
m, v = tf.nn.moments(o, [i for i in range(len(o.get_shape()) - 1)])
s = tf.sqrt(v + 1e-10)
updates = tf.group(W.assign(W / tf.reshape(s, [1] * (len(W.get_shape()) - 1) + [-1])), b.assign((b - m) / s))
sess.run(updates, feed_dict) | code |
18137853/cell_12 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | from PIL import Image
from PIL import Image
from keras.initializers import RandomNormal
from keras.models import Model, Sequential
from keras.optimizers import Adam
from tqdm import tqdm, tqdm_notebook
import matplotlib.patches as patches
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
import numpy as np
import pandas as pd
import os, os.path
from xml.etree import ElementTree as ET
def parse_annotation(fname):
objects = []
for child in ET.parse(fname).findall('object'):
dog = {}
dog['name'] = child.find('name').text
dog['pose'] = child.find('pose').text
dog['difficult'] = int(child.find('difficult').text)
dog['truncated'] = int(child.find('truncated').text)
bbox = child.find('bndbox')
dog['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)]
objects.append(dog)
return objects
IMAGE_DIR = '../input/all-dogs/all-dogs'
dog_imgs = pd.DataFrame(os.listdir(IMAGE_DIR), columns=['filename'])
dog_imgs['basename'] = dog_imgs['filename'].str.split('.').apply(lambda x: x[0])
dog_imgs[['class', 'id']] = dog_imgs['basename'].str.split('_', expand=True)
dog_imgs = dog_imgs.set_index('basename').sort_index()
ANNOTATION_DIR = '../input/annotation/Annotation'
dog_breeds = pd.DataFrame(os.listdir(ANNOTATION_DIR), columns=['dirname'])
dog_breeds[['class', 'breedname']] = dog_breeds['dirname'].str.split('-', 1, expand=True)
dog_breeds = dog_breeds.set_index('class').sort_index()
dog_imgs['annotation_filename'] = dog_imgs.apply(lambda x: os.path.join(ANNOTATION_DIR, dog_breeds.loc[x['class']]['dirname'], x.name), axis=1)
dog_imgs['objects'] = dog_imgs['annotation_filename'].apply(parse_annotation)
doggo = dog_imgs.sample(1).iloc[0]
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import imgaug.augmenters as iaa
pil_im = Image.open(os.path.join(IMAGE_DIR, doggo['filename']))
im = np.asarray(pil_im)
fig,ax = plt.subplots(1)
ax.imshow(im)
h,w,c = im.shape
for dog in doggo['objects']:
xmin, ymin, xmax, ymax = dog['bbox']
print(h,w,":",xmin,ymin,xmax,ymax)
bbox = patches.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(bbox)
plt.show()
fig,ax = plt.subplots(1)
dog = doggo.objects[0]
h,w,c = im.shape
xmin, ymin, xmax, ymax = dog['bbox']
#im = im[ymin:ymax,xmin:xmax]
pil_crop = pil_im.crop((xmin, ymin, xmax, ymax)).resize((64, 64))
im2 = np.asarray(pil_crop)
ax.imshow(im2)
plt.show()
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import imgaug.augmenters as iaa
from tqdm import tqdm, tqdm_notebook
def get_truth_images():
all_imgs = []
for _, doggo in tqdm_notebook(dog_imgs.iterrows(), total=len(dog_imgs)):
pil_im = Image.open(os.path.join(IMAGE_DIR, doggo['filename']))
h, w, c = im.shape
for dog in doggo['objects']:
border = 10
xmin, ymin, xmax, ymax = dog['bbox']
xmin = max(0, xmin - border)
ymin = max(0, ymin - border)
xmax = min(w, xmax + border)
ymax = min(h, ymax + border)
pil_crop = pil_im.crop((xmin, ymin, xmax, ymax)).resize((64, 64))
all_imgs.append(np.asarray(pil_crop))
return np.stack(all_imgs)
truth_imgs = get_truth_images()
truth_nrm_imgs = (truth_imgs - 127.5) / 127.5
from keras.optimizers import Adam
from keras import backend as K
class AdamWithWeightnorm(Adam):
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= 1.0 / (1.0 + self.decay * K.cast(self.iterations, K.floatx()))
t = K.cast(self.iterations + 1, K.floatx())
lr_t = lr * K.sqrt(1.0 - K.pow(self.beta_2, t)) / (1.0 - K.pow(self.beta_1, t))
shapes = [K.get_variable_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
ps = K.get_variable_shape(p)
if len(ps) > 1:
V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)
V_scaler_shape = K.get_variable_shape(V_scaler)
m_g = K.zeros(V_scaler_shape)
v_g = K.zeros(V_scaler_shape)
m_g_t = self.beta_1 * m_g + (1.0 - self.beta_1) * grad_g
v_g_t = self.beta_2 * v_g + (1.0 - self.beta_2) * K.square(grad_g)
new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) + self.epsilon)
self.updates.append(K.update(m_g, m_g_t))
self.updates.append(K.update(v_g, v_g_t))
m_t = self.beta_1 * m + (1.0 - self.beta_1) * grad_V
v_t = self.beta_2 * v + (1.0 - self.beta_2) * K.square(grad_V)
new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
if getattr(p, 'constraint', None) is not None:
new_V_param = p.constraint(new_V_param)
add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)
else:
m_t = self.beta_1 * m + (1.0 - self.beta_1) * g
v_t = self.beta_2 * v + (1.0 - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
import tensorflow as tf
def get_weightnorm_params_and_grads(p, g):
ps = K.get_variable_shape(p)
V_scaler_shape = (ps[-1],)
V_scaler = K.ones(V_scaler_shape)
norm_axes = [i for i in range(len(ps) - 1)]
V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])
V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
g_param = V_scaler * V_norm
grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)
return (V, V_norm, V_scaler, g_param, grad_g, grad_V)
def add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):
ps = K.get_variable_shape(new_V_param)
norm_axes = [i for i in range(len(ps) - 1)]
new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))
new_V_scaler = new_g_param / new_V_norm
new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param
updates.append(K.update(W, new_W))
updates.append(K.update(V_scaler, new_V_scaler))
def data_based_init(model, input):
if type(input) is dict:
feed_dict = input
elif type(input) is list:
feed_dict = {tf_inp: np_inp for tf_inp, np_inp in zip(model.inputs, input)}
else:
feed_dict = {model.inputs[0]: input}
if model.uses_learning_phase and K.learning_phase() not in feed_dict:
feed_dict.update({K.learning_phase(): 1})
layer_output_weight_bias = []
for l in model.layers:
trainable_weights = l.trainable_weights
if len(trainable_weights) == 2:
W, b = trainable_weights
assert l.built
layer_output_weight_bias.append((l.name, l.get_output_at(0), W, b))
sess = K.get_session()
for l, o, W, b in layer_output_weight_bias:
m, v = tf.nn.moments(o, [i for i in range(len(o.get_shape()) - 1)])
s = tf.sqrt(v + 1e-10)
updates = tf.group(W.assign(W / tf.reshape(s, [1] * (len(W.get_shape()) - 1) + [-1])), b.assign((b - m) / s))
sess.run(updates, feed_dict)
from keras.models import Model, Sequential
from keras.layers import Dense, Conv2D, Flatten, Concatenate, UpSampling2D, Dropout, LeakyReLU, ReLU, Reshape, Input, Conv2DTranspose
from keras.initializers import RandomNormal
from keras import backend as K
import tensorflow as tf
def make_discriminator_model(input_shape=(64, 64, 3)):
init = RandomNormal(mean=0.0, stddev=0.02)
model = Sequential()
model.add(Conv2D(32, kernel_size=4, strides=2, padding='same', kernel_initializer=init, input_shape=input_shape))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', kernel_initializer=init))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=4, strides=2, padding='same', kernel_initializer=init))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=4, strides=2, padding='same', kernel_initializer=init))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='linear', kernel_initializer=init))
return model
def make_generator_model(random_dim=128, start_shape=(4, 4, 64)):
init = RandomNormal(mean=0.0, stddev=0.02)
model = Sequential()
a, b, c = start_shape
start_dim = a * b * c
model.add(Dense(start_dim, kernel_initializer=init, input_dim=random_dim))
model.add(Reshape(start_shape))
model.add(UpSampling2D(interpolation='bilinear'))
model.add(Conv2D(512, kernel_size=4, padding='same', kernel_initializer=init))
model.add(ReLU())
model.add(UpSampling2D(interpolation='bilinear'))
model.add(Conv2D(256, kernel_size=4, padding='same', kernel_initializer=init))
model.add(ReLU())
model.add(UpSampling2D(interpolation='bilinear'))
model.add(Conv2D(128, kernel_size=4, padding='same', kernel_initializer=init))
model.add(ReLU())
model.add(UpSampling2D(interpolation='bilinear'))
model.add(Conv2D(64, kernel_size=4, padding='same', kernel_initializer=init))
model.add(ReLU())
model.add(Conv2D(3, kernel_size=3, activation='tanh', padding='same', kernel_initializer=init))
model.summary()
return model
def make_gan_model(dis_model, gen_model, random_dim=128):
dis_model.trainable = False
gan_input = Input(shape=(random_dim,))
gen_output = gen_model(gan_input)
gan_output = dis_model(gen_output)
gan_model = Model(inputs=gan_input, outputs=gan_output)
return gan_model
def gen_input(random_dim, n_samples):
noise = np.random.randn(random_dim * n_samples)
noise = noise.reshape((n_samples, random_dim))
return noise
def plot_gen_noise(gen_model, random_dim=128, examples=25, dim=(5, 5)):
gen_imgs = gen_model.predict(gen_input(128, 25))
gen_imgs = ((gen_imgs + 1) * 127.5).astype('uint8')
for i, img in enumerate(gen_imgs):
plt.axis('off')
plt.tight_layout()
RANDOM_DIM = 128
RAW_BATCH_SIZE = 32
DIS_TRAIN_RATIO = 2
MINI_BATCH_SIZE = RAW_BATCH_SIZE // DIS_TRAIN_RATIO
dis_model = make_discriminator_model()
gen_model = make_generator_model()
batch_count = truth_nrm_imgs.shape[0] // RAW_BATCH_SIZE
adam_nrm_op = AdamWithWeightnorm(lr=0.0002, beta_1=0.5, beta_2=0.999)
real_inp = Input(shape=truth_nrm_imgs.shape[1:])
nois_inp = Input(shape=(RANDOM_DIM,))
fake_inp = gen_model(nois_inp)
disc_r = dis_model(real_inp)
disc_f = dis_model(fake_inp)
def rel_dis_loss(_y_real, _y_pred):
epsilon = K.epsilon()
return -(K.mean(K.log(K.sigmoid(disc_r - K.mean(disc_f, axis=0)) + epsilon), axis=0) + K.mean(K.log(1 - K.sigmoid(disc_f - K.mean(disc_r, axis=0)) + epsilon), axis=0))
def rel_gen_loss(_y_real, _y_pred):
epsilon = K.epsilon()
return -(K.mean(K.log(K.sigmoid(disc_f - K.mean(disc_r, axis=0)) + epsilon), axis=0) + K.mean(K.log(1 - K.sigmoid(disc_r - K.mean(disc_f, axis=0)) + epsilon), axis=0))
def rals_dis_loss(_y_real, _y_pred):
return K.mean(K.pow(disc_r - K.mean(disc_f, axis=0) - 1, 2) + K.pow(disc_f - K.mean(disc_r, axis=0) + 1, 2))
def rals_gen_loss(_y_real, _y_pred):
return K.mean(K.pow(disc_r - K.mean(disc_f, axis=0) + 1, 2) + K.pow(disc_f - K.mean(disc_r, axis=0) - 1, 2))
gen_train = Model([nois_inp, real_inp], [disc_r, disc_f])
dis_model.trainable = False
gen_train.compile(adam_nrm_op, loss=[rals_gen_loss, None])
gen_train.summary()
dis_train = Model([nois_inp, real_inp], [disc_r, disc_f])
gen_model.trainable = False
dis_model.trainable = True
dis_train.compile(adam_nrm_op, loss=[rals_dis_loss, None])
dis_train.summary()
gen_loss = []
dis_loss = []
dummy_y = np.zeros((RAW_BATCH_SIZE, 1), dtype=np.float32)
dummy_mini_y = np.zeros((MINI_BATCH_SIZE, 1), dtype=np.float32) | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.