path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73075873/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
plt.figure(figsize=(8, 8))
plt.pie([Flag0, Flag1], labels=['Non-Risk:\n%d total' % Flag0, 'Risk:\n%d total' % Flag1], autopct='%1.2f%%') | code |
73075873/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.xticks(rotation=60)
data['Age_group'] = pd.qcut(data.Age, 5)
g = sns.FacetGrid(data=data, row='House_Ownership', col='Married/Single', height=5, aspect=1.5)
g.map_dataframe(sns.barplot, x='Age_group', y='Risk_Flag', ci=None)
g.set_xticklabels(rotation=60) | code |
73075873/cell_32 | [
"text_plain_output_1.png"
] | from imblearn.combine import SMOTETomek
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from imblearn.over_sampling import ADASYN
ada = ADASYN(random_state=42)
X_ada, y_ada = ada.fit_resample(X_train, y_train)
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
print('Initial size:', X_train.shape)
smt = SMOTETomek(tomek=TomekLinks(sampling_strategy='majority'))
X_smt, y_smt = smt.fit_resample(X_train, y_train)
print('Resampled size:', X_smt.shape) | code |
73075873/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.xticks(rotation=60)
sns.displot(x='Age', data=data, height=8, aspect=1.5, hue='Risk_Flag', bins=20) | code |
73075873/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
data.info() | code |
73075873/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.xticks(rotation=60)
sns.catplot(x='Experience', y='Income', data=data, kind='violin', height=8, aspect=1.6, palette='deep') | code |
73075873/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g = sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60) | code |
73075873/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.xticks(rotation=60)
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix
brf = BalancedRandomForestClassifier().fit(X_train, y_train)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
plt.title('asfafasf')
ax1.set_title('Confusion matrix (Balanced RF)')
ax2.set_title('ROC curve (Balanced RF)')
ax2.plot([0, 1], [0, 1], 'g--', alpha=0.25)
plot_confusion_matrix(brf, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1)
plot_roc_curve(brf, X_test, y_test, ax=ax2)
y_pred = brf.predict(X_test)
acc_brf = accuracy_score(y_test, y_pred)
f1_brf = f1_score(y_test, y_pred)
roc_brf = roc_auc_score(y_test, y_pred)
print('Roc_Auc score: %.3f' % roc_brf) | code |
73075873/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 12))
plt.xticks(rotation=60)
sns.barplot(x='STATE', y='Risk_Flag', data=data, palette='deep') | code |
73075873/cell_36 | [
"text_plain_output_1.png"
] | from imblearn.combine import SMOTETomek
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv')
import matplotlib.pyplot as plt
total = list(data.Risk_Flag.value_counts())
Flag0 = total[0]
Flag1 = total[1]
import seaborn as sns
g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep')
g.set_xticklabels(rotation=60)
import seaborn as sns
import matplotlib.pyplot as plt
plt.xticks(rotation=60)
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix
brf=BalancedRandomForestClassifier().fit(X_train, y_train)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6))
plt.title('asfafasf')
ax1.set_title('Confusion matrix (Balanced RF)')
ax2.set_title('ROC curve (Balanced RF)')
ax2.plot([0,1], [0,1], 'g--', alpha=0.25)
plot_confusion_matrix(brf, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1)
plot_roc_curve(brf, X_test, y_test, ax=ax2)
y_pred = brf.predict(X_test)
acc_brf=accuracy_score(y_test, y_pred)
f1_brf=f1_score(y_test, y_pred)
roc_brf=roc_auc_score(y_test, y_pred)
print('Roc_Auc score: %.3f' %roc_brf)
from imblearn.over_sampling import ADASYN
ada = ADASYN(random_state=42)
X_ada, y_ada = ada.fit_resample(X_train, y_train)
from sklearn.ensemble import RandomForestClassifier
rf_ada=RandomForestClassifier().fit(X_ada, y_ada)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6))
ax1.set_title('Confusion matrix (RF and ADASYN)')
ax2.set_title('ROC curve (RF and ADASYN)')
ax2.plot([0,1], [0,1], 'g--', alpha=0.25)
plot_confusion_matrix(rf_ada,X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1)
plot_roc_curve(rf_ada, X_test, y_test, ax=ax2)
y_pred = rf_ada.predict(X_test)
acc_ada=accuracy_score(y_test, y_pred)
f1_ada=f1_score(y_test, y_pred)
roc_ada=roc_auc_score(y_test, y_pred)
print('Roc_Auc score: %.3f' %roc_ada)
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
smt = SMOTETomek(tomek=TomekLinks(sampling_strategy='majority'))
X_smt, y_smt = smt.fit_resample(X_train, y_train)
rf_smt=RandomForestClassifier().fit(X_smt, y_smt)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6))
ax1.set_title('Confusion matrix (RF and SMOTETomek)')
ax2.set_title('ROC curve (RF and SMOTETomek)')
ax2.plot([0,1], [0,1], 'g--', alpha=0.25)
plot_confusion_matrix(rf_smt,X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1)
plot_roc_curve(rf_smt, X_test, y_test, ax=ax2)
y_pred = rf_smt.predict(X_test)
acc_smt=accuracy_score(y_test, y_pred)
f1_smt=f1_score(y_test, y_pred)
roc_smt=roc_auc_score(y_test, y_pred)
print('Roc_Auc score: %.3f' %roc_smt)
y_prob = rf_smt.predict_proba(X_test)
threshold = [x for x in np.linspace(0.5, 0.95, 10)]
roc = []
acc = []
for t in threshold:
y_t = [0 if x[0] > t else 1 for x in y_prob]
roc.append(roc_auc_score(y_test, y_t))
acc.append(accuracy_score(y_test, y_t))
plt.figure(figsize=(12, 8))
plt.title('ROC AUC and Accuracy vs. Threshold')
plt.plot(threshold, roc, label='ROC AUC Score')
plt.plot(threshold, acc, label='Accuracy Score')
plt.xlabel('Probabability threshold for non-risk class')
plt.ylabel('Score')
plt.legend(loc='lower left') | code |
105186160/cell_13 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
im = Image.open(working_path / 'train' / '0' / '3002.png')
im | code |
105186160/cell_9 | [
"image_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
input_path = Path('/kaggle/input')
train_image_paths = sorted(input_path.rglob('train/*.png'))
test_image_paths = sorted(input_path.rglob('test/*.png'))
train_image_paths | code |
105186160/cell_2 | [
"image_output_1.png"
] | !pip install -Uqq fastai | code |
105186160/cell_11 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
input_path = Path('/kaggle/input')
train_image_paths = sorted(input_path.rglob('train/*.png'))
test_image_paths = sorted(input_path.rglob('test/*.png'))
try:
for image_path in train_image_paths:
if '_1' in image_path.stem:
with (working_path / 'train' / '1' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
else:
with (working_path / 'train' / '0' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
except FileExistsError:
print('Training images have already been moved.')
else:
print('Training images moved.') | code |
105186160/cell_1 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105186160/cell_7 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
working_path / folders[0] / labels[0] | code |
105186160/cell_18 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
im = Image.open(working_path / 'train' / '0' / '3002.png')
im
training_images = get_image_files(working_path / 'train')
training_images
image = Image.open(training_images[1])
image
testing_images = get_image_files(working_path / 'test')
len(testing_images)
Image.open(testing_images[48]) | code |
105186160/cell_15 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
training_images = get_image_files(working_path / 'train')
training_images | code |
105186160/cell_16 | [
"image_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
im = Image.open(working_path / 'train' / '0' / '3002.png')
im
training_images = get_image_files(working_path / 'train')
training_images
image = Image.open(training_images[1])
image | code |
105186160/cell_17 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
testing_images = get_image_files(working_path / 'test')
len(testing_images) | code |
105186160/cell_14 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
input_path = Path('/kaggle/input')
train_image_paths = sorted(input_path.rglob('train/*.png'))
test_image_paths = sorted(input_path.rglob('test/*.png'))
try:
for image_path in train_image_paths:
if '_1' in image_path.stem:
with (working_path / 'train' / '1' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
else:
with (working_path / 'train' / '0' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
except FileExistsError:
try:
for image_path in test_image_paths:
if '_1' in image_path.stem:
with (working_path / 'test' / '1' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
else:
with (working_path / 'test' / '0' / image_path.name).open(mode='xb') as f:
f.write(image_path.read_bytes())
except FileExistsError:
print('Testing images have already been moved.')
else:
print('Testing images moved.') | code |
105186160/cell_12 | [
"text_plain_output_1.png"
] | working_path = Path.cwd()
folders = ('train', 'test')
labels = ('0', '1')
(working_path / 'train' / '0' / '3002.png').exists() | code |
74041457/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
(Xtrain.shape, test.shape) | code |
74041457/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
Xtrain.head() | code |
74041457/cell_11 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
import pandas as pd
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
(Xtrain.shape, test.shape)
y = Xtrain.claim
Xtrain = Xtrain.drop(['id', 'claim'], axis=1)
test_id = test.id
test = test.drop('id', axis=1)
ss = StandardScaler()
ss.fit(Xtrain)
Xtrain = ss.transform(Xtrain)
test = ss.transform(test)
pca = PCA(0.95)
pca.fit(Xtrain)
Xtrain = pca.transform(Xtrain)
test = pca.transform(test)
(Xtrain.shape, test.shape) | code |
74041457/cell_1 | [
"text_plain_output_1.png"
] | import optuna
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import optuna
from optuna.samplers import TPESampler
import catboost
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, roc_auc_score
from functools import partial
optuna.logging.set_verbosity(optuna.logging.WARNING)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74041457/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv') | code |
74041457/cell_17 | [
"text_html_output_1.png"
] | from functools import partial
from optuna.samplers import TPESampler
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score,roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
import optuna
import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import optuna
from optuna.samplers import TPESampler
import catboost
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, roc_auc_score
from functools import partial
optuna.logging.set_verbosity(optuna.logging.WARNING)
import os
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
(Xtrain.shape, test.shape)
y = Xtrain.claim
Xtrain = Xtrain.drop(['id', 'claim'], axis=1)
test_id = test.id
test = test.drop('id', axis=1)
ss = StandardScaler()
ss.fit(Xtrain)
Xtrain = ss.transform(Xtrain)
test = ss.transform(test)
pca = PCA(0.95)
pca.fit(Xtrain)
Xtrain = pca.transform(Xtrain)
test = pca.transform(test)
(Xtrain.shape, test.shape)
def getXgbHyperparameters(trial):
xgb_param = {'tree_method': 'gpu_hist', 'eval_metric': 'auc', 'n_estimators': trial.suggest_int('n_estimators', 700, 2000, 100), 'booster': 'gbtree', 'reg_lambda': trial.suggest_int('reg_lambda', 1, 100), 'reg_alpha': trial.suggest_int('reg_alpha', 1, 100), 'subsample': trial.suggest_float('subsample', 0.2, 1.0), 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.2, 1.0), 'max_depth': trial.suggest_int('max_depth', 3, 15), 'min_child_weight': trial.suggest_int('min_child_weight', 2, 18), 'learning_rate': trial.suggest_loguniform('learning_rate', 1e-05, 0.01), 'gamma': trial.suggest_float('gamma', 0, 20)}
return xgb_param
def optimize(trial, X, y):
params = getXgbHyperparameters(trial)
xgb = XGBClassifier(**params, use_label_encoder=False)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=44)
xgb.fit(X_train, y_train)
pred = xgb.predict(X_test)
return -1 * roc_auc_score(pred, y_test)
opt_func = partial(optimize, X=X_train, y=y_train)
func = lambda trial: optimize(trial, Xtrain, y)
def logging_callback(study, frozen_trial):
previous_best_value = study.user_attrs.get('previous_best_value', None)
if previous_best_value != study.best_value:
study.set_user_attr('previous_best_value', study.best_value)
study = optuna.create_study(sampler=TPESampler(seed=69), direction='minimize', study_name='xgb')
study.optimize(func, timeout=1 * 60 * 60, callbacks=[logging_callback]) | code |
74041457/cell_5 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
Xtrain = pd.read_csv('../input/sept-2021-filled/train_new.csv')
test = pd.read_csv('../input/sept-2021-filled/test_new.csv')
(Xtrain.shape, test.shape)
test.head() | code |
48163599/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_dir = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(data_dir + 'train.json', lines=True)
test = pd.read_json(data_dir + 'test.json', lines=True)
sample_df = pd.read_csv(data_dir + 'sample_submission.csv')
test.head(10) | code |
48163599/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_dir = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(data_dir + 'train.json', lines=True)
test = pd.read_json(data_dir + 'test.json', lines=True)
sample_df = pd.read_csv(data_dir + 'sample_submission.csv')
test.shape | code |
48163599/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
48163599/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_dir = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(data_dir + 'train.json', lines=True)
test = pd.read_json(data_dir + 'test.json', lines=True)
sample_df = pd.read_csv(data_dir + 'sample_submission.csv')
test.shape
print(np.tostring(test['sequence'][1])) | code |
48163599/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_dir = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(data_dir + 'train.json', lines=True)
test = pd.read_json(data_dir + 'test.json', lines=True)
sample_df = pd.read_csv(data_dir + 'sample_submission.csv')
len(test['sequence'][0]) | code |
48163599/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_dir = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(data_dir + 'train.json', lines=True)
test = pd.read_json(data_dir + 'test.json', lines=True)
sample_df = pd.read_csv(data_dir + 'sample_submission.csv')
train.shape | code |
122264608/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
model.make_inspector().variable_importances()
model.make_inspector().evaluation()
model.make_inspector().training_logs()[295:] | code |
122264608/cell_9 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
print()
for name, value in evaluation.items():
print(f'{name}: {value:.4f}') | code |
122264608/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
print(f'Label classes: {classes}')
df[label] = df[label].map(classes.index) | code |
122264608/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
df = df.iloc[:, 1:]
df.head() | code |
122264608/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
model.make_inspector().variable_importances() | code |
122264608/cell_1 | [
"text_plain_output_1.png"
] | !pip install tensorflow_decision_forests wurlitzer | code |
122264608/cell_7 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
print(f'{len(train_ds_pd)} examples in training, {len(test_ds_pd)} examples for testing.')
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label) | code |
122264608/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
model.make_inspector().variable_importances()
model.make_inspector().evaluation()
model.make_inspector().training_logs()[295:]
logs = model.make_inspector().training_logs()
sub_df = pd.read_csv('/kaggle/input/playground-series-s3e10/test.csv')
sub_df = sub_df.iloc[:, 1:]
new_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(sub_df)
y_pred = model.predict(new_dataset, verbose=0)
submission = pd.read_csv('/kaggle/input/playground-series-s3e10/sample_submission.csv')
submission['Class'] = y_pred
submission.head() | code |
122264608/cell_8 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds) | code |
122264608/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
df.head() | code |
122264608/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
sub_df = pd.read_csv('/kaggle/input/playground-series-s3e10/test.csv')
sub_df = sub_df.iloc[:, 1:]
new_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(sub_df)
submission = pd.read_csv('/kaggle/input/playground-series-s3e10/sample_submission.csv')
submission.head() | code |
122264608/cell_14 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
model.make_inspector().variable_importances()
model.make_inspector().evaluation()
model.make_inspector().training_logs()[295:]
logs = model.make_inspector().training_logs()
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel('Number of trees')
plt.ylabel('Accuracy')
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel('Number of trees')
plt.ylabel('Logloss')
plt.show() | code |
122264608/cell_10 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
tfdf.model_plotter.plot_model_in_colab(model, tree_idx=0, max_depth=3) | code |
122264608/cell_12 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow_decision_forests as tfdf
df = pd.read_csv('/kaggle/input/playground-series-s3e10/train.csv')
label = 'Class'
classes = df[label].unique().tolist()
df[label] = df[label].map(classes.index)
df = df.iloc[:, 1:]
def split_dataset(dataset, test_ratio=0.15):
test_indices = np.random.rand(len(dataset)) < test_ratio
return (dataset[~test_indices], dataset[test_indices])
train_ds_pd, test_ds_pd = split_dataset(df)
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
model = tfdf.keras.GradientBoostedTreesModel()
model.fit(train_ds)
model.compile(metrics=['accuracy', 'AUC', 'Precision', 'Recall', 'binary_crossentropy'])
evaluation = model.evaluate(test_ds, return_dict=True)
model.make_inspector().variable_importances()
model.make_inspector().evaluation() | code |
2011423/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
data_df = pd.read_csv('../input/mushrooms.csv')
data_df.info() | code |
2011423/cell_3 | [
"text_plain_output_1.png"
] | from subprocess import check_output
np.set_printoptions(suppress=True, linewidth=300)
pd.options.display.float_format = lambda x: '%0.6f' % x
print(check_output(['ls', '../input']).decode('utf-8')) | code |
2011423/cell_5 | [
"image_output_11.png",
"image_output_17.png",
"image_output_14.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_21.png",
"image_output_7.png",
"image_output_20.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data_df = pd.read_csv('../input/mushrooms.csv')
data_df['y'] = data_df['class'].map({'p': 1, 'e': 0})
columns = [c for c in data_df.columns if not c in ('class', 'y')]
single_val_c = {}
for i, c in enumerate(columns):
if data_df[c].nunique() == 1:
single_val_c[c] = data_df[c].unique()[0]
continue
s = data_df.groupby(c)['y'].mean()
sns.barplot(x=s.index, y=s)
plt.show()
for c in single_val_c.keys():
print('The column %s only has one unique value with %r' % (c, single_val_c[c])) | code |
32066544/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | (x_train.shape, y_train.shape) | code |
32066544/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df = fashion_mnist_df.sample(frac=0.3).reset_index(drop=True)
import matplotlib.pyplot as plt
LOOKUP = {0: 'T-shirt', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
def display_image(features, actual_label):
pass
X = fashion_mnist_df[fashion_mnist_df.columns[1:]]
Y = fashion_mnist_df['label']
display_image(X.loc[15].values, Y.loc[15]) | code |
32066544/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df.head(10) | code |
32066544/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df = fashion_mnist_df.sample(frac=0.3).reset_index(drop=True)
import matplotlib.pyplot as plt
LOOKUP = {0: 'T-shirt', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
def display_image(features, actual_label):
pass
X = fashion_mnist_df[fashion_mnist_df.columns[1:]]
Y = fashion_mnist_df['label']
X = X / 255
X.head() | code |
32066544/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32066544/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df = fashion_mnist_df.sample(frac=0.3).reset_index(drop=True)
import matplotlib.pyplot as plt
LOOKUP = {0: 'T-shirt', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
def display_image(features, actual_label):
pass
X = fashion_mnist_df[fashion_mnist_df.columns[1:]]
Y = fashion_mnist_df['label']
display_image(X.loc[5].values, Y.loc[5]) | code |
32066544/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df['label'].unique() | code |
32066544/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
fashion_mnist_df = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv')
fashion_mnist_df = fashion_mnist_df.sample(frac=0.3).reset_index(drop=True)
import matplotlib.pyplot as plt
LOOKUP = {0: 'T-shirt', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
def display_image(features, actual_label):
pass
X = fashion_mnist_df[fashion_mnist_df.columns[1:]]
Y = fashion_mnist_df['label']
display_image(X.loc[500].values, Y.loc[500]) | code |
2032622/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
def intialize_parameters_deep(layer_dims):
np.random.seed(4)
L = len(layer_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
return parameters
parameters = intialize_parameters_deep([7, 4, 3])
def linear_forward(A, W):
Z = np.dot(W, A.T)
cache = (A, W)
return (Z, cache)
Z, cache = linear_forward(X_train, parameters['W1'])
def sigmoid(Z):
A = 1 / (1 + np.exp(-Z))
cache = Z
return (A, cache)
def relu(Z):
A = np.maximum(0, Z)
cache = Z
return (A, cache)
A, cache = sigmoid(Z)
print(A.shape, cache.shape)
A, cache = relu(Z)
print(A.shape, cache.shape) | code |
2032622/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape) | code |
2032622/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
intialize_parameters(5, 4, 3) | code |
2032622/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
def intialize_parameters_deep(layer_dims):
np.random.seed(4)
L = len(layer_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
return parameters
parameters = intialize_parameters_deep([7, 4, 3])
print(parameters) | code |
2032622/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
def intialize_parameters_deep(layer_dims):
np.random.seed(4)
L = len(layer_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
return parameters
parameters = intialize_parameters_deep([7, 4, 3])
def linear_forward(A, W):
Z = np.dot(W, A.T)
cache = (A, W)
return (Z, cache)
Z, cache = linear_forward(X_train, parameters['W1'])
def sigmoid(Z):
A = 1 / (1 + np.exp(-Z))
cache = Z
return (A, cache)
def relu(Z):
A = np.maximum(0, Z)
cache = Z
return (A, cache)
A, cache = sigmoid(Z)
A, cache = relu(Z)
def linear_activation_forward(A_prev, W, activation):
if activation == 'sigmoid':
Z, linear_cache = linear_forward(A_prev, W)
A, activation_cache = sigmoid(Z)
if activation == 'relu':
Z, linear_cache = linear_forward(A_prev, W)
A, activation_cache = relu(Z)
cache = (linear_cache, activation_cache)
return (A, cache)
A, cache = linear_activation_forward(X_train, parameters['W1'], 'sigmoid')
print(A.shape)
A, cache = linear_activation_forward(X_train, parameters['W1'], 'relu')
print(A.shape) | code |
2032622/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
print(DataX.shape)
DataY = np.transpose([dataset[:, 7]])
print(DataY.shape) | code |
2032622/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
def intialize_parameters_deep(layer_dims):
np.random.seed(4)
L = len(layer_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
return parameters
parameters = intialize_parameters_deep([7, 4, 3])
def linear_forward(A, W):
Z = np.dot(W, A.T)
cache = (A, W)
return (Z, cache)
Z, cache = linear_forward(X_train, parameters['W1'])
def sigmoid(Z):
A = 1 / (1 + np.exp(-Z))
cache = Z
return (A, cache)
def relu(Z):
A = np.maximum(0, Z)
cache = Z
return (A, cache)
A, cache = sigmoid(Z)
A, cache = relu(Z)
def linear_activation_forward(A_prev, W, activation):
if activation == 'sigmoid':
Z, linear_cache = linear_forward(A_prev, W)
A, activation_cache = sigmoid(Z)
if activation == 'relu':
Z, linear_cache = linear_forward(A_prev, W)
A, activation_cache = relu(Z)
cache = (linear_cache, activation_cache)
return (A, cache)
A, cache = linear_activation_forward(X_train, parameters['W1'], 'sigmoid')
A, cache = linear_activation_forward(X_train, parameters['W1'], 'relu')
def L_Model_forward(X, parameters):
A = X
caches = []
L = len(parameters)
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(1)], 'relu')
caches.append(cache)
A = A.T
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], 'sigmoid')
caches.append(cache)
return (A, caches)
AL, cache = L_Model_forward(X_train, parameters)
print(AL.shape) | code |
2032622/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas
dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True)
dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site']
dataframe = dataframe.drop('seq_name', axis=1)
dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True)
dataset = dataframe.values
DataX = np.array(dataset[:, 0:7])
DataY = np.transpose([dataset[:, 7]])
X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
def intialize_parameters(n_x, n_h, n_y):
np.random.seed(4)
W1 = np.random.randn(n_h, n_x)
W2 = np.random.randn(n_y, n_h)
parameters = {'W1': W1, 'W2': W2}
return parameters
def intialize_parameters_deep(layer_dims):
np.random.seed(4)
L = len(layer_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
return parameters
parameters = intialize_parameters_deep([7, 4, 3])
def linear_forward(A, W):
Z = np.dot(W, A.T)
cache = (A, W)
return (Z, cache)
Z, cache = linear_forward(X_train, parameters['W1'])
print(Z.shape) | code |
106212034/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
movies = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv')
credit = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_credits.csv')
credit.head(3) | code |
106212034/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import ast
from sklearn.feature_extraction.text import CountVectorizer
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106212034/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
movies = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv')
credit = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_credits.csv')
movies = movies[['id', 'title', 'overview', 'tagline', 'genres', 'keywords']]
movies = movies.merge(credit, on='title')
movies.drop(['movie_id'], axis=1, inplace=True)
movies.dropna(inplace=True)
movies.isnull().sum()
movies | code |
106212034/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
movies = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv')
credit = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_credits.csv')
movies.head(3) | code |
106212034/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import ast
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
movies = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv')
credit = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_credits.csv')
movies = movies[['id', 'title', 'overview', 'tagline', 'genres', 'keywords']]
movies = movies.merge(credit, on='title')
movies.drop(['movie_id'], axis=1, inplace=True)
def convert(text):
L = []
for i in ast.literal_eval(text):
L.append(i['name'])
return L
def convert2(text):
a = []
count = 0
for i in ast.literal_eval(text):
if count < 3:
a.append(i['name'])
count += 1
return a
def convert3(data):
a = []
for i in ast.literal_eval(data):
if i['job'] == 'Director':
a.append(i['name'])
return a
movies.dropna(inplace=True)
movies.isnull().sum()
def remove_space(data):
a = []
for i in data:
a.append(i.replace(' ', ''))
return a
movies['overview'] = movies['overview'].apply(lambda x: x.split())
movies['tagline'] = movies['tagline'].apply(lambda x: x.split())
movies.drop(['overview', 'tagline', 'genres', 'keywords', 'cast', 'crew'], axis=1, inplace=True)
cv = CountVectorizer(max_features=6000, stop_words='english')
vector = cv.fit_transform(movies['tag']).toarray()
from sklearn.metrics.pairwise import cosine_similarity
cosim = cosine_similarity(vector)
def recommend(name):
index = np.where(movies['title'] == name)[0][0]
similar_items = sorted(list(enumerate(cosim[index])), key=lambda x: x[1], reverse=True)[0:5]
data = []
for i in similar_items:
v = movies[movies.index == i[0]]['title']
v = list(v)
data.append(v)
return pd.DataFrame(data)
recommend('Avatar') | code |
106212034/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
movies = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv')
credit = pd.read_csv('/kaggle/input/tmdb-movie-metadata/tmdb_5000_credits.csv')
movies = movies[['id', 'title', 'overview', 'tagline', 'genres', 'keywords']]
movies = movies.merge(credit, on='title')
movies.drop(['movie_id'], axis=1, inplace=True)
movies.dropna(inplace=True)
movies.isnull().sum() | code |
329711/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
import csv
from sklearn.ensemble import RandomForestClassifier
def munge_data(df):
"""fill in missing values and convert characters to numerical"""
df['Sex'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
median_ages[i, j] = df[(df['Sex'] == i) & (df['Pclass'] == j + 1)]['Age'].dropna().median()
for i in range(0, 2):
for j in range(0, 3):
df.loc[df.Age.isnull() & (df.Sex == i) & (df.Pclass == j + 1), 'Age'] = median_ages[i, j]
df['Embarked'] = df['Embarked'].dropna().map({'C': 1, 'S': 2, 'Q': 3}).astype(int)
mode = df['Embarked'].dropna().mode().astype(int)
df['Embarked'] = df['Embarked'].fillna(mode)
df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
return df.fillna(0)
train_df = pd.read_csv('../input/train.csv', header=0)
test_df = pd.read_csv('../input/test.csv', header=0)
ids = test_df['PassengerId'].values
train_df = munge_data(train_df)
test_df = munge_data(test_df)
train_data = train_df.values
test_data = test_df.values
rf = RandomForestClassifier(n_estimators=100)
rf = rf.fit(train_data[0:, 1:], train_data[0:, 0])
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(10, 4))
bar_placements = range(len(rf.feature_importances_))
ax.bar(bar_placements, rf.feature_importances_)
ax.set_title('Feature Importances')
ax.set_xticks([tick + 0.5 for tick in bar_placements])
ax.set_xticklabels(train_df.columns[1:])
f.show() | code |
329711/cell_2 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
import csv
from sklearn.ensemble import RandomForestClassifier
def munge_data(df):
"""fill in missing values and convert characters to numerical"""
df['Sex'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
median_ages[i, j] = df[(df['Sex'] == i) & (df['Pclass'] == j + 1)]['Age'].dropna().median()
for i in range(0, 2):
for j in range(0, 3):
df.loc[df.Age.isnull() & (df.Sex == i) & (df.Pclass == j + 1), 'Age'] = median_ages[i, j]
df['Embarked'] = df['Embarked'].dropna().map({'C': 1, 'S': 2, 'Q': 3}).astype(int)
mode = df['Embarked'].dropna().mode().astype(int)
df['Embarked'] = df['Embarked'].fillna(mode)
df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
return df.fillna(0)
train_df = pd.read_csv('../input/train.csv', header=0)
test_df = pd.read_csv('../input/test.csv', header=0)
ids = test_df['PassengerId'].values
train_df = munge_data(train_df)
test_df = munge_data(test_df)
train_data = train_df.values
test_data = test_df.values
print('Training...')
rf = RandomForestClassifier(n_estimators=100)
rf = rf.fit(train_data[0:, 1:], train_data[0:, 0])
print('Accuracy = ', (rf.predict(train_data[0:, 1:]) == train_data[0:, 0]).mean()) | code |
329711/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
import csv
from sklearn.ensemble import RandomForestClassifier
def munge_data(df):
"""fill in missing values and convert characters to numerical"""
df['Sex'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
median_ages[i, j] = df[(df['Sex'] == i) & (df['Pclass'] == j + 1)]['Age'].dropna().median()
for i in range(0, 2):
for j in range(0, 3):
df.loc[df.Age.isnull() & (df.Sex == i) & (df.Pclass == j + 1), 'Age'] = median_ages[i, j]
df['Embarked'] = df['Embarked'].dropna().map({'C': 1, 'S': 2, 'Q': 3}).astype(int)
mode = df['Embarked'].dropna().mode().astype(int)
df['Embarked'] = df['Embarked'].fillna(mode)
df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
return df.fillna(0)
train_df = pd.read_csv('../input/train.csv', header=0)
test_df = pd.read_csv('../input/test.csv', header=0)
ids = test_df['PassengerId'].values
train_df = munge_data(train_df)
test_df = munge_data(test_df)
train_data = train_df.values
test_data = test_df.values
rf = RandomForestClassifier(n_estimators=100)
rf = rf.fit(train_data[0:, 1:], train_data[0:, 0])
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(10,4))
bar_placements = range(len(rf.feature_importances_))
ax.bar(bar_placements, rf.feature_importances_)
ax.set_title("Feature Importances")
ax.set_xticks([tick + .5 for tick in bar_placements])
ax.set_xticklabels(train_df.columns[1::])
f.show()
from sklearn import cross_validation
scores = cross_validation.cross_val_score(rf, train_data[0:, 1:], train_data[0:, 0])
print(scores.mean()) | code |
90129873/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data processing, CSV file I/O (e.g. pd.read_csv)
krenth311 = pd.read_csv('../input/dataset/krenth311.csv')
krenth316 = pd.read_csv('../input/dataset/krenth316.csv')
merge = pd.concat([krenth311, krenth316])
merge.to_csv('merge.csv', index=False)
for col in ['aloneorinagroup']:
krenth311[col].value_counts(ascending=True).plot(kind='barh', title=col)
plt.xlabel('frequency')
plt.show() | code |
90129873/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import cufflinks as cf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import dates as md
import seaborn as sns
import plotly.graph_objs as go
import plotly
import cufflinks as cf
cf.set_config_file(offline=True)
import os | code |
17108074/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
sku_category_filepath = '../input/sku-category/sku_category.csv'
sku_category = pd.read_csv(sku_category_filepath, sep=None, decimal=',', engine='python')
sku_category.drop('compare', axis=1, inplace=True)
sku_category.drop('sector', axis=1, inplace=True)
sku = sku_category.copy()
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sku_price_filepath = '../input/sku-price/{}.csv'.format(date)
sku_price = pd.read_csv(sku_price_filepath, sep=None, decimal=',', engine='python')
sku_price.columns = ['sku', 'price-{}'.format(date)]
sku_sold_filepath = '../input/sku-unitssold/{}.csv'.format(date)
sku_sold = pd.read_csv(sku_sold_filepath, sep=None, decimal=',', engine='python')
sku_sold.columns = ['sku', 'sold-{}'.format(date)]
sku_price['sku'] = sku_price['sku'].astype(str)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_price['sku'] = pd.to_numeric(sku_price['sku'])
sku_sold['sku'] = sku_sold['sku'].astype(str)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold['sku'] = pd.to_numeric(sku_sold['sku'])
sku = pd.merge(sku, sku_price, on='sku', how='left')
sku = pd.merge(sku, sku_sold, on='sku', how='left')
sku.head() | code |
17108074/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
sku_category_filepath = '../input/sku-category/sku_category.csv'
sku_category = pd.read_csv(sku_category_filepath, sep=None, decimal=',', engine='python')
sku_category.drop('compare', axis=1, inplace=True)
sku_category.drop('sector', axis=1, inplace=True)
sku = sku_category.copy()
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sku_price_filepath = '../input/sku-price/{}.csv'.format(date)
sku_price = pd.read_csv(sku_price_filepath, sep=None, decimal=',', engine='python')
sku_price.columns = ['sku', 'price-{}'.format(date)]
sku_sold_filepath = '../input/sku-unitssold/{}.csv'.format(date)
sku_sold = pd.read_csv(sku_sold_filepath, sep=None, decimal=',', engine='python')
sku_sold.columns = ['sku', 'sold-{}'.format(date)]
sku_price['sku'] = sku_price['sku'].astype(str)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_price['sku'] = pd.to_numeric(sku_price['sku'])
sku_sold['sku'] = sku_sold['sku'].astype(str)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold['sku'] = pd.to_numeric(sku_sold['sku'])
sku = pd.merge(sku, sku_price, on='sku', how='left')
sku = pd.merge(sku, sku_sold, on='sku', how='left')
sku.sort_values(by='sold-2017-03-01').head(20)
sumProductsSold = 0
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sumProductsSold += sku['price-{}'.format(date)].count()
sku_numberSold = sku.copy()
columnsToDrop = [x for x in sku.columns if x != 'sku']
sku_numberSold.drop(columnsToDrop, axis=1, inplace=True)
sku_numberSold.insert(1, 'avgNumberSold', 0.0)
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
for index in sku.index:
number_sold = sku.at[index, 'sold-{}'.format(date)]
if number_sold > 0:
sku_numberSold.at[index, 'avgNumberSold'] += number_sold
sku_numberSold['avgNumberSold'] = sku_numberSold['avgNumberSold'] / 31
sku_numberSold.sort_values('avgNumberSold', ascending=False).head(20) | code |
17108074/cell_8 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
sku_category_filepath = '../input/sku-category/sku_category.csv'
sku_category = pd.read_csv(sku_category_filepath, sep=None, decimal=',', engine='python')
sku_category.drop('compare', axis=1, inplace=True)
sku_category.drop('sector', axis=1, inplace=True)
sku = sku_category.copy()
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sku_price_filepath = '../input/sku-price/{}.csv'.format(date)
sku_price = pd.read_csv(sku_price_filepath, sep=None, decimal=',', engine='python')
sku_price.columns = ['sku', 'price-{}'.format(date)]
sku_sold_filepath = '../input/sku-unitssold/{}.csv'.format(date)
sku_sold = pd.read_csv(sku_sold_filepath, sep=None, decimal=',', engine='python')
sku_sold.columns = ['sku', 'sold-{}'.format(date)]
sku_price['sku'] = sku_price['sku'].astype(str)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_price['sku'] = pd.to_numeric(sku_price['sku'])
sku_sold['sku'] = sku_sold['sku'].astype(str)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold['sku'] = pd.to_numeric(sku_sold['sku'])
sku = pd.merge(sku, sku_price, on='sku', how='left')
sku = pd.merge(sku, sku_sold, on='sku', how='left')
sku.sort_values(by='sold-2017-03-01').head(20)
sumProductsSold = 0
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sumProductsSold += sku['price-{}'.format(date)].count()
sku_numberSold = sku.copy()
columnsToDrop = [x for x in sku.columns if x != 'sku']
sku_numberSold.drop(columnsToDrop, axis=1, inplace=True)
sku_numberSold.insert(1, 'avgNumberSold', 0.0)
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
for index in sku.index:
number_sold = sku.at[index, 'sold-{}'.format(date)]
if number_sold > 0:
sku_numberSold.at[index, 'avgNumberSold'] += number_sold
sku_numberSold['avgNumberSold'] = sku_numberSold['avgNumberSold'] / 31
sku_numberSold.sort_values('avgNumberSold', ascending=False).head(20)
sku_probability = sku_numberSold.copy()
sku_probability.rename(columns={'avgNumberSold': 'probability'}, inplace=True)
for index in sku_probability.index:
probability = 1 - np.exp(-sku_probability.at[index, 'probability'])
sku_probability.at[index, 'probability'] = probability
sku_probability.sort_values(by='probability', ascending=False).head(20) | code |
17108074/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
sku_category_filepath = '../input/sku-category/sku_category.csv'
sku_category = pd.read_csv(sku_category_filepath, sep=None, decimal=',', engine='python')
sku_category.drop('compare', axis=1, inplace=True)
sku_category.drop('sector', axis=1, inplace=True)
sku = sku_category.copy()
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sku_price_filepath = '../input/sku-price/{}.csv'.format(date)
sku_price = pd.read_csv(sku_price_filepath, sep=None, decimal=',', engine='python')
sku_price.columns = ['sku', 'price-{}'.format(date)]
sku_sold_filepath = '../input/sku-unitssold/{}.csv'.format(date)
sku_sold = pd.read_csv(sku_sold_filepath, sep=None, decimal=',', engine='python')
sku_sold.columns = ['sku', 'sold-{}'.format(date)]
sku_price['sku'] = sku_price['sku'].astype(str)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_price['sku'] = pd.to_numeric(sku_price['sku'])
sku_sold['sku'] = sku_sold['sku'].astype(str)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold['sku'] = pd.to_numeric(sku_sold['sku'])
sku = pd.merge(sku, sku_price, on='sku', how='left')
sku = pd.merge(sku, sku_sold, on='sku', how='left')
sku.sort_values(by='sold-2017-03-01').head(20) | code |
17108074/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
sku_category_filepath = '../input/sku-category/sku_category.csv'
sku_category = pd.read_csv(sku_category_filepath, sep=None, decimal=',', engine='python')
sku_category.drop('compare', axis=1, inplace=True)
sku_category.drop('sector', axis=1, inplace=True)
sku = sku_category.copy()
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
sku_price_filepath = '../input/sku-price/{}.csv'.format(date)
sku_price = pd.read_csv(sku_price_filepath, sep=None, decimal=',', engine='python')
sku_price.columns = ['sku', 'price-{}'.format(date)]
sku_sold_filepath = '../input/sku-unitssold/{}.csv'.format(date)
sku_sold = pd.read_csv(sku_sold_filepath, sep=None, decimal=',', engine='python')
sku_sold.columns = ['sku', 'sold-{}'.format(date)]
sku_price['sku'] = sku_price['sku'].astype(str)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_price.drop(sku_price[sku_price['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_price['sku'] = pd.to_numeric(sku_price['sku'])
sku_sold['sku'] = sku_sold['sku'].astype(str)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold.drop(sku_sold[sku_sold['sku'] == 'S080501_1500_30_EUR'].index.values, axis=0, inplace=True)
sku_sold['sku'] = pd.to_numeric(sku_sold['sku'])
sku = pd.merge(sku, sku_price, on='sku', how='left')
sku = pd.merge(sku, sku_sold, on='sku', how='left')
sku.sort_values(by='sold-2017-03-01').head(20)
print('We have {} rows (products) in our table'.format(sku.shape[0]))
sumProductsSold = 0
for i in range(1, 32):
if i < 10:
date = '2017-03-0{}'.format(i)
else:
date = '2017-03-{}'.format(i)
if sku['price-{}'.format(date)].count() != sku['sold-{}'.format(date)].count():
print('The price and sold columns do not have the same number of entries for {}'.format(date))
print('{}: {} products sold'.format(date, sku['price-{}'.format(date)].count()))
sumProductsSold += sku['price-{}'.format(date)].count()
print('At maximum, out of the {} products, {} were sold in the given data'.format(sku.shape[0], sumProductsSold)) | code |
32068625/cell_6 | [
"text_html_output_1.png"
] | import networkx as nx
import plotly.graph_objects as go
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(['Chloroquine phosphate', 'Spike (S) antibody', 'IL-6 antibody', 'Remdesivir', 'Favipiravir', 'Fluorouracil', 'Ribavirin', 'Acyclovir', 'Ritonavir', 'Lopinavir', 'Kaletra', 'Darunavir', 'Arbidol', 'Hydroxychloroquine', 'Oseltamivir'])
G = nx.Graph()
for i in node_list:
G.add_node(i)
G.add_edges_from([('Spike (S) antibody', 'IL-6 antibody')])
G.add_edges_from([('Remdesivir', 'Favipiravir')])
G.add_edges_from([('Remdesivir', 'Fluorouracil')])
G.add_edges_from([('Remdesivir', 'Ribavirin')])
G.add_edges_from([('Remdesivir', 'Acyclovir')])
G.add_edges_from([('Fluorouracil', 'Favipiravir')])
G.add_edges_from([('Ribavirin', 'Favipiravir')])
G.add_edges_from([('Acyclovir', 'Favipiravir')])
G.add_edges_from([('Fluorouracil', 'Ribavirin')])
G.add_edges_from([('Fluorouracil', 'Acyclovir')])
G.add_edges_from([('Ribavirin', 'Acyclovir')])
G.add_edges_from([('Ritonavir', 'Lopinavir')])
G.add_edges_from([('Ritonavir', 'Kaletra')])
G.add_edges_from([('Ritonavir', 'Darunavir')])
G.add_edges_from([('Lopinavir', 'Kaletra')])
G.add_edges_from([('Lopinavir', 'Darunavir')])
G.add_edges_from([('Kaletra', 'Darunavir')])
G.add_edges_from([('Arbidol', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Arbidol')])
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Groups of drugs in clinical trials by working mechanisms', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=100, l=100, r=100, t=100), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
fig.show() | code |
32068625/cell_7 | [
"text_html_output_2.png"
] | import networkx as nx
import networkx as nx
import numpy as np
import plotly.graph_objects as go
import plotly.graph_objects as go
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(['Chloroquine phosphate', 'Spike (S) antibody', 'IL-6 antibody', 'Remdesivir', 'Favipiravir', 'Fluorouracil', 'Ribavirin', 'Acyclovir', 'Ritonavir', 'Lopinavir', 'Kaletra', 'Darunavir', 'Arbidol', 'Hydroxychloroquine', 'Oseltamivir'])
G = nx.Graph()
for i in node_list:
G.add_node(i)
G.add_edges_from([('Spike (S) antibody', 'IL-6 antibody')])
G.add_edges_from([('Remdesivir', 'Favipiravir')])
G.add_edges_from([('Remdesivir', 'Fluorouracil')])
G.add_edges_from([('Remdesivir', 'Ribavirin')])
G.add_edges_from([('Remdesivir', 'Acyclovir')])
G.add_edges_from([('Fluorouracil', 'Favipiravir')])
G.add_edges_from([('Ribavirin', 'Favipiravir')])
G.add_edges_from([('Acyclovir', 'Favipiravir')])
G.add_edges_from([('Fluorouracil', 'Ribavirin')])
G.add_edges_from([('Fluorouracil', 'Acyclovir')])
G.add_edges_from([('Ribavirin', 'Acyclovir')])
G.add_edges_from([('Ritonavir', 'Lopinavir')])
G.add_edges_from([('Ritonavir', 'Kaletra')])
G.add_edges_from([('Ritonavir', 'Darunavir')])
G.add_edges_from([('Lopinavir', 'Kaletra')])
G.add_edges_from([('Lopinavir', 'Darunavir')])
G.add_edges_from([('Kaletra', 'Darunavir')])
G.add_edges_from([('Arbidol', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Arbidol')])
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Groups of drugs in clinical trials by working mechanisms', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=100, l=100, r=100, t=100), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
import numpy as np
exist = {}
LIST = open('../input/drugdata/sorted_alresult.coronavirus', 'r')
for line in LIST:
line = line.replace('\\s\\s+', '\t')
line = line.strip()
table = line.split(' ')
if float(table[0]) > 4:
exist[table[1]] = 0
REF = open('../input/drugdata/combine_drug_name_id.csv', 'r')
DATA = open('../input/drugdata/combined_fp2_data.csv', 'r')
drug = []
all_drug = {}
for ref in REF:
ref = ref.strip()
rrr = ref.split(',')
if rrr[1].lower() in exist:
drug.append(rrr[1])
data = DATA.readline()
data = data.strip()
data = data.split(',')
kkk = 0
for i in data:
data[kkk] = float(i)
kkk + 1
all_drug[rrr[1]] = np.asarray(data).astype(np.float)
REF.close()
DATA.close()
connections1 = []
connections2 = []
for drug1 in drug:
for drug2 in drug:
if drug1 < drug2:
cor = np.corrcoef(all_drug[drug1], all_drug[drug2])
if cor[0, 1] > 0.35:
connections1.append(drug1)
connections2.append(drug2)
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(all_drug.keys())
G = nx.Graph()
for i in node_list:
G.add_node(i)
i = 0
for drug1 in connections1:
drug2 = connections2[i]
G.add_edges_from([(drug1, drug2)])
i = i + 1
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Similarity of chemical structures among the drugs that are related to coronavirus in literature', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=50, l=100, r=100, t=50), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
fig.show() | code |
32068625/cell_8 | [
"text_html_output_1.png"
] | import networkx as nx
import networkx as nx
import networkx as nx
import numpy as np
import numpy as np
import plotly.graph_objects as go
import plotly.graph_objects as go
import plotly.graph_objects as go
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(['Chloroquine phosphate', 'Spike (S) antibody', 'IL-6 antibody', 'Remdesivir', 'Favipiravir', 'Fluorouracil', 'Ribavirin', 'Acyclovir', 'Ritonavir', 'Lopinavir', 'Kaletra', 'Darunavir', 'Arbidol', 'Hydroxychloroquine', 'Oseltamivir'])
G = nx.Graph()
for i in node_list:
G.add_node(i)
G.add_edges_from([('Spike (S) antibody', 'IL-6 antibody')])
G.add_edges_from([('Remdesivir', 'Favipiravir')])
G.add_edges_from([('Remdesivir', 'Fluorouracil')])
G.add_edges_from([('Remdesivir', 'Ribavirin')])
G.add_edges_from([('Remdesivir', 'Acyclovir')])
G.add_edges_from([('Fluorouracil', 'Favipiravir')])
G.add_edges_from([('Ribavirin', 'Favipiravir')])
G.add_edges_from([('Acyclovir', 'Favipiravir')])
G.add_edges_from([('Fluorouracil', 'Ribavirin')])
G.add_edges_from([('Fluorouracil', 'Acyclovir')])
G.add_edges_from([('Ribavirin', 'Acyclovir')])
G.add_edges_from([('Ritonavir', 'Lopinavir')])
G.add_edges_from([('Ritonavir', 'Kaletra')])
G.add_edges_from([('Ritonavir', 'Darunavir')])
G.add_edges_from([('Lopinavir', 'Kaletra')])
G.add_edges_from([('Lopinavir', 'Darunavir')])
G.add_edges_from([('Kaletra', 'Darunavir')])
G.add_edges_from([('Arbidol', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Hydroxychloroquine')])
G.add_edges_from([('Chloroquine phosphate', 'Arbidol')])
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Groups of drugs in clinical trials by working mechanisms', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=100, l=100, r=100, t=100), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
import numpy as np
exist = {}
LIST = open('../input/drugdata/sorted_alresult.coronavirus', 'r')
for line in LIST:
line = line.replace('\\s\\s+', '\t')
line = line.strip()
table = line.split(' ')
if float(table[0]) > 4:
exist[table[1]] = 0
REF = open('../input/drugdata/combine_drug_name_id.csv', 'r')
DATA = open('../input/drugdata/combined_fp2_data.csv', 'r')
drug = []
all_drug = {}
for ref in REF:
ref = ref.strip()
rrr = ref.split(',')
if rrr[1].lower() in exist:
drug.append(rrr[1])
data = DATA.readline()
data = data.strip()
data = data.split(',')
kkk = 0
for i in data:
data[kkk] = float(i)
kkk + 1
all_drug[rrr[1]] = np.asarray(data).astype(np.float)
REF.close()
DATA.close()
connections1 = []
connections2 = []
for drug1 in drug:
for drug2 in drug:
if drug1 < drug2:
cor = np.corrcoef(all_drug[drug1], all_drug[drug2])
if cor[0, 1] > 0.35:
connections1.append(drug1)
connections2.append(drug2)
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(all_drug.keys())
G = nx.Graph()
for i in node_list:
G.add_node(i)
i = 0
for drug1 in connections1:
drug2 = connections2[i]
G.add_edges_from([(drug1, drug2)])
i = i + 1
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Similarity of chemical structures among the drugs that are related to coronavirus in literature', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=50, l=100, r=100, t=50), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
import numpy as np
exist = {}
LIST = open('../input/drugdata/sorted_alresult.covid19', 'r')
for line in LIST:
line = line.replace('\\s\\s+', '\t')
line = line.strip()
table = line.split(' ')
if float(table[0]) > 0:
exist[table[1]] = 0
REF = open('../input/drugdata/combine_drug_name_id.csv', 'r')
DATA = open('../input/drugdata/combined_fp2_data.csv', 'r')
drug = []
all_drug = {}
for ref in REF:
ref = ref.strip()
rrr = ref.split(',')
if rrr[1].lower() in exist:
drug.append(rrr[1])
data = DATA.readline()
data = data.strip()
data = data.split(',')
kkk = 0
for i in data:
data[kkk] = float(i)
kkk + 1
all_drug[rrr[1]] = np.asarray(data).astype(np.float)
REF.close()
DATA.close()
connections1 = []
connections2 = []
for drug1 in drug:
for drug2 in drug:
if drug1 < drug2:
cor = np.corrcoef(all_drug[drug1], all_drug[drug2])
if cor[0, 1] > 0.35:
connections1.append(drug1)
connections2.append(drug2)
import sys
import plotly.graph_objects as go
import networkx as nx
node_list = list(all_drug.keys())
G = nx.Graph()
for i in node_list:
G.add_node(i)
i = 0
for drug1 in connections1:
drug2 = connections2[i]
G.add_edges_from([(drug1, drug2)])
i = i + 1
pos = nx.spring_layout(G, k=0.5, iterations=50)
for n, p in pos.items():
G.nodes[n]['pos'] = p
edge_trace = go.Scatter(x=[], y=[], line=dict(width=1, color='#888'), hoverinfo='none', mode='lines')
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict(showscale=True, colorscale='RdBu', reversescale=True, color=[], size=15, colorbar=dict(thickness=5, xanchor='left', titleside='right'), line=dict(width=0)))
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
node_info = adjacencies[0]
node_trace['text'] += tuple([node_info])
fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout(title='Similarity of chemical structures among the drugs that are related to COVID-19 in literature', titlefont=dict(size=12), showlegend=False, hovermode='closest', margin=dict(b=50, l=100, r=100, t=50), annotations=[dict(text='', showarrow=False, xref='paper', yref='paper')], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
fig.show() | code |
16113855/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/champs-scalar-coupling/train.csv')
test = pd.read_csv('../input/champs-scalar-coupling/test.csv')
sub = pd.read_csv('../input/champs-scalar-coupling/sample_submission.csv')
train_dist = pd.read_csv('../input/distance-features/train_dist.csv')
test_dist = pd.read_csv('../input/distance-features/test_dist.csv')
train = pd.merge(train.drop(['atom_index_0', 'atom_index_1', 'type'], axis=1), train_dist, how='left', on='id')
test = pd.merge(test.drop(['atom_index_0', 'atom_index_1', 'type'], axis=1), test_dist, how='left', on='id')
del train_dist, test_dist
train_dipole_moment = pd.read_csv('../input/imputing-molecular-features/train_dipole_moment.csv')
test_dipole_moment = pd.read_csv('../input/imputing-molecular-features/test_dipole_moment.csv')
train = pd.merge(train, train_dipole_moment, how='left', on='molecule_name')
test = pd.merge(test, test_dipole_moment, how='left', on='molecule_name')
train_potential_energy = pd.read_csv('../input/imputing-molecular-features/train_potential_energy.csv')
test_potential_energy = pd.read_csv('../input/imputing-molecular-features/test_potential_energy.csv')
train = pd.merge(train, train_potential_energy, how='left', on='molecule_name')
test = pd.merge(test, test_potential_energy, how='left', on='molecule_name')
train_ob_charges = pd.read_csv('../input/v7-estimation-of-mulliken-charges-with-open-babel/train_ob_charges.csv')
test_ob_charges = pd.read_csv('../input/v7-estimation-of-mulliken-charges-with-open-babel/test_ob_charges.csv')
train = pd.merge(train, train_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem0'}, axis=1)
train = pd.merge(train, train_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem1'}, axis=1)
test = pd.merge(test, test_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem0'}, axis=1)
test = pd.merge(test, test_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem1'}, axis=1)
# https://www.kaggle.com/artgor/artgor-utils
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
train = reduce_mem_usage(train)
test = reduce_mem_usage(test) | code |
16113855/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16113855/cell_15 | [
"text_plain_output_1.png"
] | from numpy.random import permutation
import lightgbm
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/champs-scalar-coupling/train.csv')
test = pd.read_csv('../input/champs-scalar-coupling/test.csv')
sub = pd.read_csv('../input/champs-scalar-coupling/sample_submission.csv')
train_dist = pd.read_csv('../input/distance-features/train_dist.csv')
test_dist = pd.read_csv('../input/distance-features/test_dist.csv')
train = pd.merge(train.drop(['atom_index_0', 'atom_index_1', 'type'], axis=1), train_dist, how='left', on='id')
test = pd.merge(test.drop(['atom_index_0', 'atom_index_1', 'type'], axis=1), test_dist, how='left', on='id')
del train_dist, test_dist
train_dipole_moment = pd.read_csv('../input/imputing-molecular-features/train_dipole_moment.csv')
test_dipole_moment = pd.read_csv('../input/imputing-molecular-features/test_dipole_moment.csv')
train = pd.merge(train, train_dipole_moment, how='left', on='molecule_name')
test = pd.merge(test, test_dipole_moment, how='left', on='molecule_name')
train_potential_energy = pd.read_csv('../input/imputing-molecular-features/train_potential_energy.csv')
test_potential_energy = pd.read_csv('../input/imputing-molecular-features/test_potential_energy.csv')
train = pd.merge(train, train_potential_energy, how='left', on='molecule_name')
test = pd.merge(test, test_potential_energy, how='left', on='molecule_name')
train_ob_charges = pd.read_csv('../input/v7-estimation-of-mulliken-charges-with-open-babel/train_ob_charges.csv')
test_ob_charges = pd.read_csv('../input/v7-estimation-of-mulliken-charges-with-open-babel/test_ob_charges.csv')
train = pd.merge(train, train_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem0'}, axis=1)
train = pd.merge(train, train_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem1'}, axis=1)
test = pd.merge(test, test_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem0'}, axis=1)
test = pd.merge(test, test_ob_charges[['molecule_name', 'atom_index', 'eem']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'eem': 'eem1'}, axis=1)
# https://www.kaggle.com/artgor/artgor-utils
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
train = reduce_mem_usage(train)
test = reduce_mem_usage(test)
pred_vars = [v for v in train.columns if v not in ['id', 'molecule_name', 'scalar_coupling_constant', 'atom_index_x', 'atom_index_y']]
molecule_names = pd.DataFrame(permutation(train['molecule_name'].unique()), columns=['molecule_name'])
nm = molecule_names.shape[0]
ntrn = int(0.9 * nm)
nval = int(0.1 * nm)
tmp_train = pd.merge(train, molecule_names[0:ntrn], how='right', on='molecule_name')
tmp_val = pd.merge(train, molecule_names[ntrn:nm], how='right', on='molecule_name')
X_train = tmp_train[pred_vars]
X_val = tmp_val[pred_vars]
y_train = tmp_train['scalar_coupling_constant']
y_val = tmp_val['scalar_coupling_constant']
del tmp_train, tmp_val
params = {'objective': 'regression_l1', 'learning_rate': 0.1, 'num_leaves': 1023, 'num_threads': -1, 'bagging_fraction': 0.5, 'bagging_freq': 1, 'feature_fraction': 0.9, 'lambda_l1': 10.0, 'max_bin': 255, 'min_child_samples': 15}
cat_feats = ['type', 'type_0', 'type_1', 'atom_0l', 'atom_0r', 'atom_1l', 'atom_1r']
train_data = lightgbm.Dataset(X_train, label=y_train, categorical_feature=cat_feats)
val_data = lightgbm.Dataset(X_val, label=y_val, categorical_feature=cat_feats)
model = lightgbm.train(params, train_data, valid_sets=[train_data, val_data], verbose_eval=500, num_boost_round=4000, early_stopping_rounds=100) | code |
128045262/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes
df['LOCATION START DATE'].nunique() | code |
128045262/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns | code |
128045262/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes | code |
128045262/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10) | code |
128045262/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes
df['LOCATION END DATE'].nunique() | code |
128045262/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes
df['LOCATION'].nunique() | code |
128045262/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes
df['CITY'].nunique() | code |
128045262/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
df.info() | code |
128045262/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
from IPython.display import Image
import plotly.express as px
from IPython.display import Image
from wordcloud import WordCloud, STOPWORDS
df = pd.read_csv('/kaggle/input/listing-of-business/Listing of Active Businesses.csv')
df.sample(10)
df.columns
dtypes = pd.DataFrame(df.dtypes, columns=['DataTypes'])
dtypes
print('Shape of the Dataset is {} Rows and {} Columns.'.format(len(df), len(df.columns))) | code |
129014537/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94mNumber of rows in train data: {train.shape[0]}')
print(f'\x1b[94mNumber of columns in train data: {train.shape[1]}')
print(f'\x1b[94mNumber of values in train data: {train.count().sum()}')
print(f'\x1b[94mNumber missing values in train data: {sum(train.isna().sum())}') | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.