path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2021796/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import KFold
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.fillna('unknown', inplace=True)
test_df.fillna('unknown', inplace=True)
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
X = train_df.comment_text
test_X = test_df.comment_text
tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1)
tfidf_vec.fit(X)
train_tfidf = tfidf_vec.transform(X)
test_tfidf = tfidf_vec.transform(test_X)
folds = KFold(n_splits=5, shuffle=True, random_state=7)
pred_test = np.zeros((len(test_X), len(label_cols)))
for i, t in enumerate(label_cols):
print(t)
y = train_df.loc[:, [t]].values.reshape(-1)
for train_idx, test_idx in folds.split(train_tfidf):
xtr = train_tfidf[train_idx]
ytr = y[train_idx]
xval = train_tfidf[test_idx]
yval = y[test_idx]
model = LogisticRegression(C=9.0)
model.fit(xtr, ytr)
pred_train = model.predict_proba(xtr)
loss_train = log_loss(ytr, pred_train)
pred_val = model.predict_proba(xval)
loss_val = log_loss(yval, pred_val)
pred_test[:, i] += model.predict_proba(test_tfidf)[:, 1]
print('train loss:', loss_train, 'test loss', loss_val) | code |
128044990/cell_34 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
combo[-combo.duplicated(subset=feature_set_2)].loc[:, feature_set_2] | code |
128044990/cell_23 | [
"text_html_output_1.png"
] | from scipy.cluster.hierarchy import dendrogram, ward
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
def distance(data, label=''):
distances = data.corr()
dist_linkage = ward(distances)
dendro = dendrogram(dist_linkage, labels=data.columns, leaf_rotation=90)
def find_duplicates(data, column, label=''):
pass
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
feature_set_4 = ['andrena', 'osmia']
orig_train[-orig_train.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
orig_train[-orig_train.duplicated(subset=feature_set_2)].loc[:, feature_set_2]
orig_train[-orig_train.duplicated(subset=feature_set_4)].loc[:, feature_set_4]
find_duplicates(orig_train, feature_set_4, 'original train') | code |
128044990/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
orig_train[-orig_train.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
orig_train[-orig_train.duplicated(subset=feature_set_2)].loc[:, feature_set_2] | code |
128044990/cell_29 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
heatmap(combo, 'Competition') | code |
128044990/cell_39 | [
"text_plain_output_1.png"
] | from scipy.cluster.hierarchy import dendrogram, ward
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
def distance(data, label=''):
distances = data.corr()
dist_linkage = ward(distances)
dendro = dendrogram(dist_linkage, labels=data.columns, leaf_rotation=90)
def find_duplicates(data, column, label=''):
pass
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
combo[-combo.duplicated(subset=feature_set_2)].loc[:, feature_set_2]
find_duplicates(combo, feature_set_3, 'competition') | code |
128044990/cell_18 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
orig_train[-orig_train.duplicated(subset=feature_set_1)].loc[:, feature_set_1] | code |
128044990/cell_32 | [
"text_plain_output_1.png"
] | from scipy.cluster.hierarchy import dendrogram, ward
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
def distance(data, label=''):
distances = data.corr()
dist_linkage = ward(distances)
dendro = dendrogram(dist_linkage, labels=data.columns, leaf_rotation=90)
def find_duplicates(data, column, label=''):
pass
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
find_duplicates(combo, feature_set_1, 'competition') | code |
128044990/cell_3 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100) | code |
128044990/cell_35 | [
"text_plain_output_1.png"
] | from scipy.cluster.hierarchy import dendrogram, ward
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
def distance(data, label=''):
distances = data.corr()
dist_linkage = ward(distances)
dendro = dendrogram(dist_linkage, labels=data.columns, leaf_rotation=90)
def find_duplicates(data, column, label=''):
pass
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
combo[-combo.duplicated(subset=feature_set_2)].loc[:, feature_set_2]
find_duplicates(combo, feature_set_2, 'competition') | code |
128044990/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1] | code |
128044990/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
feature_set_4 = ['andrena', 'osmia']
orig_train[-orig_train.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
orig_train[-orig_train.duplicated(subset=feature_set_2)].loc[:, feature_set_2]
orig_train[-orig_train.duplicated(subset=feature_set_4)].loc[:, feature_set_4] | code |
128044990/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc | code |
128044990/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc | code |
128044990/cell_37 | [
"text_plain_output_1.png"
] | from scipy.cluster.hierarchy import dendrogram, ward
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
def distance(data, label=''):
distances = data.corr()
dist_linkage = ward(distances)
dendro = dendrogram(dist_linkage, labels=data.columns, leaf_rotation=90)
def find_duplicates(data, column, label=''):
pass
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
feature_set_1 = ['RainingDays', 'AverageRainingDays']
feature_set_2 = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
feature_set_3 = ['fruitset', 'fruitmass', 'seeds']
feature_set_4 = ['andrena', 'osmia']
combo = pd.concat([train.drop('yield', axis=1), test])
desc = pd.DataFrame(index=combo.columns)
desc['count'] = len(combo)
desc['nunique'] = combo.nunique()
desc['%unique'] = desc['nunique'] / len(combo) * 100
desc['null'] = combo.isna().sum()
desc['type'] = combo.dtypes
desc
combo[-combo.duplicated(subset=feature_set_1)].loc[:, feature_set_1]
combo[-combo.duplicated(subset=feature_set_2)].loc[:, feature_set_2]
find_duplicates(combo, feature_set_4, 'competition') | code |
128044990/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from scipy.cluster.hierarchy import dendrogram, ward
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
def heatmap(dataset, label=None):
corr = dataset.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
desc = pd.DataFrame(index=orig_train.columns)
desc['count'] = len(orig_train)
desc['nunique'] = orig_train.nunique()
desc['%unique'] = desc['nunique'] / len(orig_train) * 100
desc['null'] = orig_train.isna().sum()
desc['type'] = orig_train.dtypes
desc
heatmap(orig_train, 'Original Train') | code |
104128883/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.info() | code |
104128883/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.describe() | code |
104128883/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.head() | code |
104128883/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104128883/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.duplicated() | code |
104128883/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.duplicated()
insurance.sort_values(by='charges', ascending=1)
Q2 = insurance['charges'].median()
print(Q2) | code |
104128883/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape | code |
104128883/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.duplicated()
plt.figure(figsize=(6, 7))
sns.boxplot(insurance['charges'])
plt.show() | code |
104128883/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.duplicated()
insurance['region'].unique() | code |
104128883/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum()
insurance.duplicated()
data_dummies = pd.get_dummies(insurance)
data_dummies.head() | code |
104128883/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
insurance = pd.read_csv('/kaggle/input/insurance/insurance.csv')
insurance.shape
insurance.isna().sum() | code |
89122142/cell_42 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
for layer in base_model.layers:
print(layer.name, layer.trainable) | code |
89122142/cell_25 | [
"image_output_1.png"
] | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os
import pathlib
import random
def plot_loss_curves(history):
"""
Returns separate loss curves for training and validation metrics.
Args:
history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
"""
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(len(history.history['loss']))
def compare_historys(original_history, new_history, initial_epochs=5):
"""
Compares two TensorFlow model History objects.
Args:
original_history: History object from original model (before new_history)
new_history: History object from continued model training (after original_history)
initial_epochs: Number of epochs in original_history (new_history plot starts from here)
"""
acc = original_history.history['accuracy']
loss = original_history.history['loss']
val_acc = original_history.history['val_accuracy']
val_loss = original_history.history['val_loss']
total_acc = acc + new_history.history['accuracy']
total_loss = loss + new_history.history['loss']
total_val_acc = val_acc + new_history.history['val_accuracy']
total_val_loss = val_loss + new_history.history['val_loss']
def walk_through_dir(dir_path):
"""
Walks through dir_path returning its contents.
Args:
dir_path (str): target directory
Returns:
A print out of:
number of images (files) in each subdirectory
name of each subdirectory
"""
# Plot Some Random Images
data_dir = pathlib.Path('101_food_classes_10_percent/train')
class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))
def view_rand_img(target_dir):
plt.figure(figsize = (12,8)) # create the figure size
for i in range(12): # loop to show 12 images at a time
ax = plt.subplot(3, 4, i+1) # show the chosen 12 images in a 3 * 4 grid
rand_class = random.choice(class_names) # choose a random class
target_folder = '101_food_classes_10_percent/' + target_dir + "/" + rand_class # create the directory to the images
rand_img = random.sample(os.listdir(target_folder), 12) # choose the 12 images randomly
img = mpimg.imread(target_folder + "/" + rand_img[i]) # read the images
plt.imshow(img) # show the images
plt.title(rand_class) # set title
plt.axis(False) # hide the axis
view_rand_img('train') | code |
89122142/cell_23 | [
"text_plain_output_1.png"
] | import os
def walk_through_dir(dir_path):
"""
Walks through dir_path returning its contents.
Args:
dir_path (str): target directory
Returns:
A print out of:
number of images (files) in each subdirectory
name of each subdirectory
"""
walk_through_dir('101_food_classes_10_percent') | code |
89122142/cell_44 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1]) | code |
89122142/cell_55 | [
"text_plain_output_1.png"
] | import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
y_labels = []
for images, labels in test_data.unbatch():
y_labels.append(labels.numpy().argmax())
y_labels[:10] | code |
89122142/cell_39 | [
"image_output_2.png",
"image_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import matplotlib.pyplot as plt
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
def plot_loss_curves(history):
"""
Returns separate loss curves for training and validation metrics.
Args:
history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
"""
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(len(history.history['loss']))
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
plot_loss_curves(history_all_classes_10_percent) | code |
89122142/cell_26 | [
"image_output_1.png"
] | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os
import pathlib
import random
def plot_loss_curves(history):
"""
Returns separate loss curves for training and validation metrics.
Args:
history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
"""
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(len(history.history['loss']))
def compare_historys(original_history, new_history, initial_epochs=5):
"""
Compares two TensorFlow model History objects.
Args:
original_history: History object from original model (before new_history)
new_history: History object from continued model training (after original_history)
initial_epochs: Number of epochs in original_history (new_history plot starts from here)
"""
acc = original_history.history['accuracy']
loss = original_history.history['loss']
val_acc = original_history.history['val_accuracy']
val_loss = original_history.history['val_loss']
total_acc = acc + new_history.history['accuracy']
total_loss = loss + new_history.history['loss']
total_val_acc = val_acc + new_history.history['val_accuracy']
total_val_loss = val_loss + new_history.history['val_loss']
def walk_through_dir(dir_path):
"""
Walks through dir_path returning its contents.
Args:
dir_path (str): target directory
Returns:
A print out of:
number of images (files) in each subdirectory
name of each subdirectory
"""
# Plot Some Random Images
data_dir = pathlib.Path('101_food_classes_10_percent/train')
class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))
def view_rand_img(target_dir):
plt.figure(figsize = (12,8)) # create the figure size
for i in range(12): # loop to show 12 images at a time
ax = plt.subplot(3, 4, i+1) # show the chosen 12 images in a 3 * 4 grid
rand_class = random.choice(class_names) # choose a random class
target_folder = '101_food_classes_10_percent/' + target_dir + "/" + rand_class # create the directory to the images
rand_img = random.sample(os.listdir(target_folder), 12) # choose the 12 images randomly
img = mpimg.imread(target_folder + "/" + rand_img[i]) # read the images
plt.imshow(img) # show the images
plt.title(rand_class) # set title
plt.axis(False) # hide the axis
view_rand_img('test') | code |
89122142/cell_48 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
model.save('101_food_classes_10_percent_fine_tuned') | code |
89122142/cell_54 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
model.save('101_food_classes_10_percent_fine_tuned')
pred_probs = model.predict(test_data, verbose=1)
pred_classes = pred_probs.argmax(axis=1)
print(len(pred_classes))
pred_classes[:10] | code |
89122142/cell_50 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
loaded_model = tf.keras.models.load_model('101_food_classes_10_percent_fine_tuned')
loaded_model_results = loaded_model.evaluate(test_data) | code |
89122142/cell_52 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
model.save('101_food_classes_10_percent_fine_tuned')
pred_probs = model.predict(test_data, verbose=1) | code |
89122142/cell_45 | [
"image_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import matplotlib.pyplot as plt
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
def plot_loss_curves(history):
"""
Returns separate loss curves for training and validation metrics.
Args:
history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
"""
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(len(history.history['loss']))
def compare_historys(original_history, new_history, initial_epochs=5):
"""
Compares two TensorFlow model History objects.
Args:
original_history: History object from original model (before new_history)
new_history: History object from continued model training (after original_history)
initial_epochs: Number of epochs in original_history (new_history plot starts from here)
"""
acc = original_history.history['accuracy']
loss = original_history.history['loss']
val_acc = original_history.history['val_accuracy']
val_loss = original_history.history['val_loss']
total_acc = acc + new_history.history['accuracy']
total_loss = loss + new_history.history['loss']
total_val_acc = val_acc + new_history.history['val_accuracy']
total_val_loss = val_loss + new_history.history['val_loss']
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
compare_historys(original_history=history_all_classes_10_percent, new_history=history_all_classes_10_percent_fine_tune, initial_epochs=5) | code |
89122142/cell_18 | [
"text_plain_output_1.png"
] | !wget https://storage.googleapis.com/ztm_tf_course/food_vision/101_food_classes_10_percent.zip | code |
89122142/cell_28 | [
"text_plain_output_1.png"
] | import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False) | code |
89122142/cell_35 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback]) | code |
89122142/cell_43 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
for layer_number, layer in enumerate(model.layers[2].layers):
print(layer_number, layer.name, layer.trainable) | code |
89122142/cell_53 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results
base_model.trainable = True
for layer in base_model.layers[:-5]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy'])
history_all_classes_10_percent_fine_tune = model.fit(train_data_all_10_percent, epochs=10, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback], initial_epoch=history_all_classes_10_percent.epoch[-1])
model.save('101_food_classes_10_percent_fine_tuned')
pred_probs = model.predict(test_data, verbose=1)
print(f'Number of images: {len(test_data)}')
print(f'Number of probabilities: {len(pred_probs)}')
print(f'Number of probabilities per image: {len(pred_probs[0])}')
print(f'Predicted First Image Belong to class number: {pred_probs[0].argmax()}') | code |
89122142/cell_37 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
import datetime
import tensorflow as tf
def create_tensorboard_callback(dir_name, experiment_name):
"""
Creates a TensorBoard callback instand to store log files.
Stores log files with the filepath: "dir_name/experiment_name/current_datetime/"
Args:
dire_name: target directory to store TensorBoard log files
experiment_name: name of experiment directory (e.g. efficientnet_model_1)
"""
log_dir = dir_name + '/' + experiment_name + '/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
return tensorboard_callback
train_dir = '101_food_classes_10_percent/train/'
test_dir = '101_food_classes_10_percent/test/'
IMG_SIZE = (224, 224)
train_data_all_10_percent = tf.keras.preprocessing.image_dataset_from_directory(train_dir, label_mode='categorical', image_size=IMG_SIZE)
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir, label_mode='categorical', image_size=IMG_SIZE, shuffle=False)
checkpoint_path = '101_classes_10_percent_data_model_checkpoint'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, monitor='val_accuracy', save_best_only=True)
data_augmentation = Sequential([preprocessing.RandomFlip('horizontal'), preprocessing.RandomRotation(0.2), preprocessing.RandomHeight(0.2), preprocessing.RandomWidth(0.2), preprocessing.RandomZoom(0.2)], name='Data_Augmentation')
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
inputs = layers.Input(shape=(224, 224, 3), name='input_layer')
x = data_augmentation(inputs)
x = base_model(x, training=False)
x = layers.GlobalAveragePooling2D(name='global_avg_pooling')(x)
outputs = layers.Dense(len(train_data_all_10_percent.class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history_all_classes_10_percent = model.fit(train_data_all_10_percent, epochs=5, validation_data=test_data, validation_steps=int(0.15 * len(test_data)), callbacks=[checkpoint_callback])
eval_results = model.evaluate(test_data)
eval_results | code |
73097309/cell_9 | [
"image_output_1.png"
] | from skimage.transform import resize
import SimpleITK as sitk
import SimpleITK as sitk
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
image = load(path='../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/01006/', kind='FLAIR')
image.shape | code |
73097309/cell_23 | [
"image_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from skimage.util import montage
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional as F
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
image = load(path='../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/01006/', kind='FLAIR')
image.shape
for i in range(64):
plt.axis('off')
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
dataloader = get_dataloader(dataset=BratsDataset, path_to_csv='test_data.csv', phase='train', fold=0)
len(dataloader)
data = next(iter(dataloader))
(data['Id'], data['image'].shape)
for i in range(64):
plt.axis('off')
class DoubleConv(nn.Module):
"""(Conv3D -> BN -> ReLU) * 2"""
def __init__(self, in_channels, out_channels, num_groups=8):
super().__init__()
self.double_conv = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True), nn.Conv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.encoder = nn.Sequential(nn.MaxPool3d(2, 2), DoubleConv(in_channels, out_channels))
def forward(self, x):
return self.encoder(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, trilinear=False):
super().__init__()
if trilinear:
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
else:
self.up = nn.ConvTranspose3d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffZ = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
diffX = x2.size()[4] - x1.size()[4]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2, diffZ // 2, diffZ - diffZ // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Out(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet3d(nn.Module):
def __init__(self, in_channels, n_classes, n_channels):
super().__init__()
self.in_channels = in_channels
self.n_classes = n_classes
self.n_channels = n_channels
self.conv = DoubleConv(in_channels, n_channels)
self.enc1 = Down(n_channels, 2 * n_channels)
self.enc2 = Down(2 * n_channels, 4 * n_channels)
self.enc3 = Down(4 * n_channels, 8 * n_channels)
self.enc4 = Down(8 * n_channels, 8 * n_channels)
self.dec1 = Up(16 * n_channels, 4 * n_channels)
self.dec2 = Up(8 * n_channels, 2 * n_channels)
self.dec3 = Up(4 * n_channels, n_channels)
self.dec4 = Up(2 * n_channels, n_channels)
self.out = Out(n_channels, n_classes)
def forward(self, x):
x1 = self.conv(x)
x2 = self.enc1(x1)
x3 = self.enc2(x2)
x4 = self.enc3(x3)
x5 = self.enc4(x4)
mask = self.dec1(x5, x4)
mask = self.dec2(mask, x3)
mask = self.dec3(mask, x2)
mask = self.dec4(mask, x1)
mask = self.out(mask)
return mask
model = UNet3d(in_channels=4, n_classes=3, n_channels=24)
model.load_state_dict(torch.load('../input/seg-model/best_model_0.17.pth'))
model.eval()
class ShowResult:
def mask_preprocessing(self, mask):
"""
Test.
"""
mask = mask.squeeze().cpu().detach().numpy()
mask = np.moveaxis(mask, (0, 1, 2, 3), (0, 3, 2, 1))
mask_WT = np.rot90(montage(mask[0]))
mask_TC = np.rot90(montage(mask[1]))
mask_ET = np.rot90(montage(mask[2]))
return mask_WT, mask_TC, mask_ET
def image_preprocessing(self, image):
"""
Returns image flair as mask for overlaping gt and predictions.
"""
image = image.squeeze().cpu().detach().numpy()
image = np.moveaxis(image, (0, 1, 2, 3), (0, 3, 2, 1))
flair_img = np.rot90(montage(image[0]))
return flair_img
def plot(self, image, prediction):
image = self.image_preprocessing(image)
# gt_mask_WT, gt_mask_TC, gt_mask_ET = self.mask_preprocessing(ground_truth)
pr_mask_WT, pr_mask_TC, pr_mask_ET = self.mask_preprocessing(prediction)
fig, axes = plt.subplots(1, 2, figsize = (35, 30))
[ax.axis("off") for ax in axes]
# axes[0].set_title("Ground Truth", fontsize=35, weight='bold')
# axes[0].imshow(image, cmap ='bone')
# axes[0].imshow(np.ma.masked_where(gt_mask_WT == False, gt_mask_WT),
# cmap='cool_r', alpha=0.6)
# axes[0].imshow(np.ma.masked_where(gt_mask_TC == False, gt_mask_TC),
# cmap='autumn_r', alpha=0.6)
# axes[0].imshow(np.ma.masked_where(gt_mask_ET == False, gt_mask_ET),
# cmap='autumn', alpha=0.6)
axes[1].set_title("Prediction", fontsize=35, weight='bold')
axes[1].imshow(image, cmap ='bone')
axes[1].imshow(np.ma.masked_where(pr_mask_WT == False, pr_mask_WT),
cmap='cool_r', alpha=0.6)
axes[1].imshow(np.ma.masked_where(pr_mask_TC == False, pr_mask_TC),
cmap='autumn_r', alpha=0.6)
axes[1].imshow(np.ma.masked_where(pr_mask_ET == False, pr_mask_ET),
cmap='autumn', alpha=0.6)
plt.tight_layout()
plt.show()
train_data = []
ID = []
pos = []
for itr, data in enumerate(dataloader):
print(itr)
treshold = 0.3
id_ = int(data['Id'][0])
id_ = '{0:05d}'.format(id_)
print(id_)
img = data['image']
logits = model(img)
probs = torch.sigmoid(logits)
show = (probs >= treshold).float()
predictions = show.numpy()
predictions = predictions.reshape(3, 128, 128, 64)
predictions = np.moveaxis(predictions, (0, 1, 2, 3), (0, 3, 2, 1))
train_data.append(predictions)
ID.append(id_)
pos.append(itr) | code |
73097309/cell_2 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import os
import time
from random import randint
from keras.callbacks import CSVLogger
import numpy as np
from scipy import stats
import pandas as pd
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.model_selection import KFold
import nibabel as nib
import pydicom as pdm
import nilearn as nl
import nilearn.plotting as nlplt
import h5py
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.animation as anim
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
import seaborn as sns
import imageio
from skimage.transform import resize
from skimage.util import montage
from IPython.display import Image as show_gif
from IPython.display import clear_output
from IPython.display import YouTubeVideo
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn import MSELoss
import albumentations as A
from albumentations import Compose, HorizontalFlip
import SimpleITK as sitk
import sys
import os
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import os
import pydicom
import matplotlib.pyplot as plt
from pydicom.pixel_data_handlers.util import apply_voi_lut
import glob
import cv2
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch import optim
import keras
import warnings
import os
import zipfile
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
warnings.filterwarnings('ignore')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.seterr(divide='ignore', invalid='ignore')
from keras.layers import Conv3D, MaxPool3D, Flatten, Dense
from keras.layers import Dropout, Input, BatchNormalization
from sklearn.metrics import confusion_matrix, accuracy_score
from plotly.offline import iplot, init_notebook_mode
from keras.losses import categorical_crossentropy
from keras.optimizers import Adadelta
import plotly.graph_objs as go
from matplotlib.pyplot import cm
from keras.models import Model
import numpy as np
import keras
import h5py
import SimpleITK as sitk
init_notebook_mode(connected=True) | code |
73097309/cell_19 | [
"text_html_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional as F
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
class DoubleConv(nn.Module):
"""(Conv3D -> BN -> ReLU) * 2"""
def __init__(self, in_channels, out_channels, num_groups=8):
super().__init__()
self.double_conv = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True), nn.Conv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.encoder = nn.Sequential(nn.MaxPool3d(2, 2), DoubleConv(in_channels, out_channels))
def forward(self, x):
return self.encoder(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, trilinear=False):
super().__init__()
if trilinear:
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
else:
self.up = nn.ConvTranspose3d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffZ = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
diffX = x2.size()[4] - x1.size()[4]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2, diffZ // 2, diffZ - diffZ // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Out(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet3d(nn.Module):
def __init__(self, in_channels, n_classes, n_channels):
super().__init__()
self.in_channels = in_channels
self.n_classes = n_classes
self.n_channels = n_channels
self.conv = DoubleConv(in_channels, n_channels)
self.enc1 = Down(n_channels, 2 * n_channels)
self.enc2 = Down(2 * n_channels, 4 * n_channels)
self.enc3 = Down(4 * n_channels, 8 * n_channels)
self.enc4 = Down(8 * n_channels, 8 * n_channels)
self.dec1 = Up(16 * n_channels, 4 * n_channels)
self.dec2 = Up(8 * n_channels, 2 * n_channels)
self.dec3 = Up(4 * n_channels, n_channels)
self.dec4 = Up(2 * n_channels, n_channels)
self.out = Out(n_channels, n_classes)
def forward(self, x):
x1 = self.conv(x)
x2 = self.enc1(x1)
x3 = self.enc2(x2)
x4 = self.enc3(x3)
x5 = self.enc4(x4)
mask = self.dec1(x5, x4)
mask = self.dec2(mask, x3)
mask = self.dec3(mask, x2)
mask = self.dec4(mask, x1)
mask = self.out(mask)
return mask
model = UNet3d(in_channels=4, n_classes=3, n_channels=24)
model.load_state_dict(torch.load('../input/seg-model/best_model_0.17.pth')) | code |
73097309/cell_15 | [
"text_plain_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
dataloader = get_dataloader(dataset=BratsDataset, path_to_csv='test_data.csv', phase='train', fold=0)
len(dataloader)
data = next(iter(dataloader))
(data['Id'], data['image'].shape) | code |
73097309/cell_16 | [
"image_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
image = load(path='../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/01006/', kind='FLAIR')
image.shape
for i in range(64):
plt.axis('off')
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
dataloader = get_dataloader(dataset=BratsDataset, path_to_csv='test_data.csv', phase='train', fold=0)
len(dataloader)
data = next(iter(dataloader))
(data['Id'], data['image'].shape)
plt.figure(figsize=(12, 12))
for i in range(64):
plt.subplot(8, 8, i + 1)
plt.imshow(data['image'][0][0][i])
plt.axis('off') | code |
73097309/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
labels.head() | code |
73097309/cell_24 | [
"text_plain_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from skimage.util import montage
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional as F
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
image = load(path='../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/01006/', kind='FLAIR')
image.shape
for i in range(64):
plt.axis('off')
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
dataloader = get_dataloader(dataset=BratsDataset, path_to_csv='test_data.csv', phase='train', fold=0)
len(dataloader)
data = next(iter(dataloader))
(data['Id'], data['image'].shape)
for i in range(64):
plt.axis('off')
class DoubleConv(nn.Module):
"""(Conv3D -> BN -> ReLU) * 2"""
def __init__(self, in_channels, out_channels, num_groups=8):
super().__init__()
self.double_conv = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True), nn.Conv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.GroupNorm(num_groups=num_groups, num_channels=out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.encoder = nn.Sequential(nn.MaxPool3d(2, 2), DoubleConv(in_channels, out_channels))
def forward(self, x):
return self.encoder(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, trilinear=False):
super().__init__()
if trilinear:
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
else:
self.up = nn.ConvTranspose3d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffZ = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
diffX = x2.size()[4] - x1.size()[4]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2, diffZ // 2, diffZ - diffZ // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Out(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet3d(nn.Module):
def __init__(self, in_channels, n_classes, n_channels):
super().__init__()
self.in_channels = in_channels
self.n_classes = n_classes
self.n_channels = n_channels
self.conv = DoubleConv(in_channels, n_channels)
self.enc1 = Down(n_channels, 2 * n_channels)
self.enc2 = Down(2 * n_channels, 4 * n_channels)
self.enc3 = Down(4 * n_channels, 8 * n_channels)
self.enc4 = Down(8 * n_channels, 8 * n_channels)
self.dec1 = Up(16 * n_channels, 4 * n_channels)
self.dec2 = Up(8 * n_channels, 2 * n_channels)
self.dec3 = Up(4 * n_channels, n_channels)
self.dec4 = Up(2 * n_channels, n_channels)
self.out = Out(n_channels, n_classes)
def forward(self, x):
x1 = self.conv(x)
x2 = self.enc1(x1)
x3 = self.enc2(x2)
x4 = self.enc3(x3)
x5 = self.enc4(x4)
mask = self.dec1(x5, x4)
mask = self.dec2(mask, x3)
mask = self.dec3(mask, x2)
mask = self.dec4(mask, x1)
mask = self.out(mask)
return mask
model = UNet3d(in_channels=4, n_classes=3, n_channels=24)
model.load_state_dict(torch.load('../input/seg-model/best_model_0.17.pth'))
model.eval()
class ShowResult:
def mask_preprocessing(self, mask):
"""
Test.
"""
mask = mask.squeeze().cpu().detach().numpy()
mask = np.moveaxis(mask, (0, 1, 2, 3), (0, 3, 2, 1))
mask_WT = np.rot90(montage(mask[0]))
mask_TC = np.rot90(montage(mask[1]))
mask_ET = np.rot90(montage(mask[2]))
return mask_WT, mask_TC, mask_ET
def image_preprocessing(self, image):
"""
Returns image flair as mask for overlaping gt and predictions.
"""
image = image.squeeze().cpu().detach().numpy()
image = np.moveaxis(image, (0, 1, 2, 3), (0, 3, 2, 1))
flair_img = np.rot90(montage(image[0]))
return flair_img
def plot(self, image, prediction):
image = self.image_preprocessing(image)
# gt_mask_WT, gt_mask_TC, gt_mask_ET = self.mask_preprocessing(ground_truth)
pr_mask_WT, pr_mask_TC, pr_mask_ET = self.mask_preprocessing(prediction)
fig, axes = plt.subplots(1, 2, figsize = (35, 30))
[ax.axis("off") for ax in axes]
# axes[0].set_title("Ground Truth", fontsize=35, weight='bold')
# axes[0].imshow(image, cmap ='bone')
# axes[0].imshow(np.ma.masked_where(gt_mask_WT == False, gt_mask_WT),
# cmap='cool_r', alpha=0.6)
# axes[0].imshow(np.ma.masked_where(gt_mask_TC == False, gt_mask_TC),
# cmap='autumn_r', alpha=0.6)
# axes[0].imshow(np.ma.masked_where(gt_mask_ET == False, gt_mask_ET),
# cmap='autumn', alpha=0.6)
axes[1].set_title("Prediction", fontsize=35, weight='bold')
axes[1].imshow(image, cmap ='bone')
axes[1].imshow(np.ma.masked_where(pr_mask_WT == False, pr_mask_WT),
cmap='cool_r', alpha=0.6)
axes[1].imshow(np.ma.masked_where(pr_mask_TC == False, pr_mask_TC),
cmap='autumn_r', alpha=0.6)
axes[1].imshow(np.ma.masked_where(pr_mask_ET == False, pr_mask_ET),
cmap='autumn', alpha=0.6)
plt.tight_layout()
plt.show()
train_data = []
ID = []
pos = []
for itr, data in enumerate(dataloader):
treshold = 0.3
id_ = int(data['Id'][0])
id_ = '{0:05d}'.format(id_)
img = data['image']
logits = model(img)
probs = torch.sigmoid(logits)
show = (probs >= treshold).float()
predictions = show.numpy()
predictions = predictions.reshape(3, 128, 128, 64)
predictions = np.moveaxis(predictions, (0, 1, 2, 3), (0, 3, 2, 1))
train_data.append(predictions)
ID.append(id_)
pos.append(itr)
train_data[0].shape | code |
73097309/cell_14 | [
"text_html_output_1.png"
] | from albumentations import Compose, HorizontalFlip
from skimage.transform import resize
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import Dataset,DataLoader
import SimpleITK as sitk
import SimpleITK as sitk
import numpy as np
import numpy as np
import numpy as np
import numpy as np
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
class BratsDataset(Dataset):
def __init__(self, df: pd.DataFrame, phase: str='test', is_resize: bool=False):
self.df = df
self.phase = phase
self.augmentations = get_augmentations(phase)
self.data_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
self.is_resize = is_resize
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
id_ = self.df['imfolder'][idx]
path = self.df['path'][idx]
id_ = str(id_)
images = []
for data_type in self.data_types:
img_path = path
img = self.load_img(img_path, data_type)
img = img.reshape(64, 128, 128)
img = img.numpy()
img = self.normalize(img)
images.append(img)
img = np.stack(images)
img = np.moveaxis(img, (0, 1, 2, 3), (0, 3, 2, 1))
if self.phase != 'test':
augmented = self.augmentations(image=img.astype(np.float32))
img = augmented['image']
return {'Id': id_, 'image': img}
return {'Id': id_, 'image': img}
def load_img(self, file_path, data_type):
data = load(file_path, data_type)
return data
def normalize(self, data: np.ndarray):
data_min = 0
return (data - data_min) / (np.amax(data) - data_min)
def get_augmentations(phase):
list_transforms = []
list_trfms = Compose(list_transforms)
return list_trfms
def get_dataloader(dataset: torch.utils.data.Dataset, path_to_csv: str, phase: str, fold: int=0, batch_size: int=1, num_workers: int=0):
"""Returns: dataloader for the model training"""
df = pd.read_csv(path_to_csv)
dataset = dataset(df, phase)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=False, shuffle=True)
return dataloader
test_data = pd.read_csv('test_data.csv')
test_data | code |
73097309/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from skimage.transform import resize
import SimpleITK as sitk
import SimpleITK as sitk
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import os
import os
import os
import os
import pandas as pd
import pandas as pd
import torch
import torch
import torch
labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
data_len = 1000
labels['imfolder'] = ['{0:05d}'.format(s) for s in labels['BraTS21ID']]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
labels['path'] = [os.path.join(path, f) for f in labels['imfolder']]
train = labels[:data_len]
val_len = int(data_len * 0.2)
train = labels[:0]
path = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/'
p = []
d = []
for i in range(1010):
id_ = '{0:05d}'.format(i)
if os.path.exists(path + id_):
p.append(path + id_)
d.append(id_)
def load(path, kind, image_size=128, depth=64):
directory = path + '/' + kind
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(directory)
reader.SetFileNames(dicom_names)
image = reader.Execute()
image = sitk.GetArrayFromImage(image)
mid = int(image.shape[0] / 2)
if image.shape[0] >= 64:
image = image[mid - 32:mid + 32, :, :]
image = resize(image, (64, 128, 128), preserve_range=True)
image = torch.tensor(image)
return image
image = load(path='../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/01006/', kind='FLAIR')
image.shape
plt.figure(figsize=(12, 12))
for i in range(64):
plt.subplot(8, 8, i + 1)
plt.imshow(image[i])
plt.axis('off') | code |
73066496/cell_21 | [
"image_output_1.png"
] | from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique()
df_train = df_train.drop(['id'], axis=1)
df_train = shuffle(df_train)
target = ['target']
var_categorical = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
var_numerical = list(set(df_train.columns) - set(var_categorical) - set(target))
def label_values(ax, spacing=5):
total = 0
for rect in ax.patches:
total += rect.get_height()
for rect in ax.patches:
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
space = spacing
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = '{:.2f}, {:.2f}'.format(y_value, y_value / total * 100)
ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords='offset points', ha='center', va=va)
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.countplot(x = df_train[column])
label_values(ax)
plt.show()
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.boxplot(x = df_train[column], y = df_train['target'])
label_values(ax)
plt.show()
i = 1
for column in var_numerical:
print(column.title())
plt.subplots(figsize=(16, 50))
plt.subplot(len(var_numerical) + 1, 3, i)
sns.boxplot(y=df_train[column])
i += 1
plt.subplot(len(var_numerical) + 1, 3, i)
sns.distplot(x=df_train[column])
i += 1
plt.subplot(len(var_numerical) + 1, 3, i)
sns.scatterplot(y=df_train['target'], x=df_train[column])
i += 1
plt.show() | code |
73066496/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_test.nunique() | code |
73066496/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.info() | code |
73066496/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique()
df_train = df_train.drop(['id'], axis=1)
df_train = shuffle(df_train)
target = ['target']
var_categorical = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
var_numerical = list(set(df_train.columns) - set(var_categorical) - set(target))
def label_values(ax, spacing=5):
total = 0
for rect in ax.patches:
total += rect.get_height()
for rect in ax.patches:
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
space = spacing
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = '{:.2f}, {:.2f}'.format(y_value, y_value / total * 100)
ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords='offset points', ha='center', va=va)
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.countplot(x = df_train[column])
label_values(ax)
plt.show()
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.boxplot(x=df_train[column], y=df_train['target'])
label_values(ax)
plt.show() | code |
73066496/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique() | code |
73066496/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique()
df_train = df_train.drop(['id'], axis=1)
df_train = shuffle(df_train)
target = ['target']
var_categorical = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
var_numerical = list(set(df_train.columns) - set(var_categorical) - set(target))
def label_values(ax, spacing=5):
total = 0
for rect in ax.patches:
total += rect.get_height()
for rect in ax.patches:
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
space = spacing
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = '{:.2f}, {:.2f}'.format(y_value, y_value / total * 100)
ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords='offset points', ha='center', va=va)
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.countplot(x=df_train[column])
label_values(ax)
plt.show() | code |
73066496/cell_7 | [
"image_output_11.png",
"application_vnd.jupyter.stderr_output_24.png",
"application_vnd.jupyter.stderr_output_16.png",
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_15.png",
"application_vnd.jupyter.stderr_output_18.png",
"text_plain_output_9.png",
"image_output_14.png",
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_26.png",
"application_vnd.jupyter.stderr_output_6.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"application_vnd.jupyter.stderr_output_12.png",
"application_vnd.jupyter.stderr_output_8.png",
"text_plain_output_27.png",
"image_output_7.png",
"application_vnd.jupyter.stderr_output_10.png",
"text_plain_output_21.png",
"text_plain_output_25.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"application_vnd.jupyter.stderr_output_28.png",
"image_output_6.png",
"application_vnd.jupyter.stderr_output_20.png",
"text_plain_output_23.png",
"application_vnd.jupyter.stderr_output_22.png",
"image_output_12.png",
"text_plain_output_1.png",
"image_output_3.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"application_vnd.jupyter.stderr_output_14.png",
"image_output_9.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.head() | code |
73066496/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique()
df_train = df_train.drop(['id'], axis=1)
df_train = shuffle(df_train)
target = ['target']
var_categorical = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
var_numerical = list(set(df_train.columns) - set(var_categorical) - set(target))
sns.boxplot(x=df_train['target'])
plt.show() | code |
73066496/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.describe() | code |
73066496/cell_22 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_8.png",
"image_output_6.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_train.nunique()
df_train = df_train.drop(['id'], axis=1)
df_train = shuffle(df_train)
target = ['target']
var_categorical = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
var_numerical = list(set(df_train.columns) - set(var_categorical) - set(target))
def label_values(ax, spacing=5):
total = 0
for rect in ax.patches:
total += rect.get_height()
for rect in ax.patches:
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
space = spacing
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = '{:.2f}, {:.2f}'.format(y_value, y_value / total * 100)
ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords='offset points', ha='center', va=va)
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.countplot(x = df_train[column])
label_values(ax)
plt.show()
for column in var_categorical:
plt.figure(figsize=(15, 6))
print(column.title())
ax = sns.boxplot(x = df_train[column], y = df_train['target'])
label_values(ax)
plt.show()
i = 1
for column in var_numerical:
i += 1
i += 1
i += 1
plt.figure(figsize=(15, 15))
sns.heatmap(df_train.corr(), annot=True)
plt.show() | code |
73066496/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
df_test.info() | code |
72062718/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
(test.shape, train.shape)
train.drop(['id'], axis=1, inplace=True)
test.drop(['id'], axis=1, inplace=True)
(test.shape, train.shape)
test.fillna(0, inplace=True)
train.fillna(0, inplace=True)
corr = train.corr()
columns_to_delete = corr[corr.loss < 0.001][corr.loss > -0.001].index | code |
72062718/cell_34 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearnex import patch_sklearn
from sklearnex import patch_sklearn
patch_sklearn() | code |
72062718/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
test.head() | code |
72062718/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
(test.shape, train.shape)
train.drop(['id'], axis=1, inplace=True)
test.drop(['id'], axis=1, inplace=True)
(test.shape, train.shape) | code |
72062718/cell_32 | [
"text_plain_output_1.png"
] | !pip install scikit-learn-intelex -q --progress-bar off | code |
72062718/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
(test.shape, train.shape)
train.drop(['id'], axis=1, inplace=True)
test.drop(['id'], axis=1, inplace=True)
all_data = [train, test]
for df in all_data:
df['f77^2/f52^2'] = df['f77'] ** 2 / df['f52'] ** 2
df['f74^2/f81^2'] = df['f74'] ** 2 / df['f81'] ** 2
df['f77/f69'] = df['f77'] / df['f69']
df['f81^2/f77^2'] = df['f81'] ** 2 / df['f77'] ** 2
df['f96/f28'] = df['f96'] / df['f28']
df['f96^2/f73^2'] = df['f96'] ** 2 / df['f73'] ** 2
df['f78/f28'] = df['f78'] / df['f28']
df['f73/f28'] = df['f73'] / df['f28']
df['f66/f69'] = df['f66'] / df['f69']
df['f46^2/f4^2'] = df['f46'] ** 2 / df['f4'] ** 2
df['f4/f75'] = df['f4'] / df['f75']
df['f69^2/f96^2'] = df['f69'] ** 2 / df['f96'] ** 2
df['f25/f69'] = df['f25'] / df['f69']
df['f78/f69'] = df['f78'] / df['f69']
df['f96^2/f77^2'] = df['f96'] ** 2 / df['f77'] ** 2
df['f4^2/f52^2'] = df['f4'] ** 2 / df['f52'] ** 2
df['f66^2/f52^2'] = df['f66'] ** 2 / df['f52'] ** 2
df['f4^2/f81^2'] = df['f4'] ** 2 / df['f81'] ** 2
df['f46^2/f81^2'] = df['f46'] ** 2 / df['f81'] ** 2
df['f47/f69'] = df['f47'] / df['f69']
df['f74xf70'] = df['f74'] * df['f70']
df['f46^2/f66^2'] = df['f46'] ** 2 / df['f66'] ** 2
df['f74/f47'] = df['f74'] / df['f47']
df['f96^2xf69^2'] = df['f96'] ** 2 / df['f69'] ** 2
df['f66/f46'] = df['f66'] / df['f46']
df['f25xf96'] = df['f25'] * df['f96']
df['f28xf81'] = df['f28'] * df['f81']
df['f52xf66'] = df['f52'] * df['f66']
df['f46^2xf81^2'] = df['f46'] ** 2 * df['f81'] ** 2
df['f46xf74'] = df['f46'] * df['f74']
df['f28_log'] = np.log2(df['f28'])
df['f28xf70'] = df['f28'] * df['f70']
df['f52_log'] = np.log2(df['f52'])
df['f47_log'] = np.log2(df['f47'])
df['f66xf73'] = df['f66'] * df['f73']
df['f69_log'] = np.log2(df['f69'])
df['f96/f78'] = df['f96'] / df['f78'] | code |
72062718/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
train.head() | code |
72062718/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_sub = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
pseudo = pd.read_csv('../input/blending-tool-tps-aug-2021/file1_7.85192_file2_7.85244_blend.csv')
(test.shape, train.shape) | code |
128020426/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
df_train.describe() | code |
128020426/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
df_train.info() | code |
128020426/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
plt.figure(figsize=(16, 5))
sns.heatmap(df_train.corr(), cmap='crest', annot=True, fmt='.3f')
plt.show() | code |
128020426/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
128020426/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
pp = sns.pairplot(data=df_train, y_vars=['yield'], x_vars=['fruitset', 'fruitmass', 'seeds']) | code |
128020426/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
X_train_full = df_train.drop(['yield'], axis=1)
y_train_full = df_train['yield']
X_test = df_test
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, test_size=0.2)
pipeline = Pipeline([('scaler', StandardScaler()), ('random_forest_regr', RandomForestRegressor())])
pipeline.fit(X_train.values, y_train) | code |
128020426/cell_16 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
X_train_full = df_train.drop(['yield'], axis=1)
y_train_full = df_train['yield']
X_test = df_test
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, test_size=0.2)
pipeline = Pipeline([('scaler', StandardScaler()), ('random_forest_regr', RandomForestRegressor())])
pipeline.fit(X_train.values, y_train)
print('MAE of train:', mean_absolute_error(pipeline.predict(X_train.values), y_train))
print('MAE of valid:', mean_absolute_error(pipeline.predict(X_valid.values), y_valid)) | code |
128020426/cell_3 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
df_train.head() | code |
128020426/cell_14 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id')
X_train_full = df_train.drop(['yield'], axis=1)
y_train_full = df_train['yield']
X_test = df_test
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, test_size=0.2)
print('Train: X shape={} y shape={}'.format(X_train.shape, y_train.shape))
print('Valid: X shape={} y shape={}'.format(X_valid.shape, y_valid.shape)) | code |
122247764/cell_57 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
param_grid = {'bootstrap': [False, True], 'max_depth': [5, 8, 10, 20], 'max_features': [3, 4, 5, None], 'min_samples_split': [2, 10, 12], 'n_estimators': [100, 200, 300]}
rfc = RandomForestClassifier()
clf = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
clf = RandomForestClassifier(bootstrap=False, max_depth=10, max_features=3, min_samples_split=12, n_estimators=100, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
train_predict = clf.predict(X_train)
test_predict = clf.predict(X_test)
print('Accuracy on testing data: ', metrics.accuracy_score(y_test, test_predict))
print('Precision on testing data:', metrics.precision_score(y_test, test_predict))
print('Recall on testing data: ', metrics.recall_score(y_test, test_predict)) | code |
122247764/cell_56 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
param_grid = {'bootstrap': [False, True], 'max_depth': [5, 8, 10, 20], 'max_features': [3, 4, 5, None], 'min_samples_split': [2, 10, 12], 'n_estimators': [100, 200, 300]}
rfc = RandomForestClassifier()
clf = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
clf = RandomForestClassifier(bootstrap=False, max_depth=10, max_features=3, min_samples_split=12, n_estimators=100, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
train_predict = clf.predict(X_train)
print('Precision on training data:', metrics.precision_score(y_train, train_predict))
print('Recall on training data:', metrics.recall_score(y_train, train_predict)) | code |
122247764/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
student = pd.read_csv('/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv')
student.shape
student.columns
student.sample(4)
student.drop(student.index[student['Target'] == 'Enrolled'], inplace=True)
student.dtypes | code |
122247764/cell_55 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
param_grid = {'bootstrap': [False, True], 'max_depth': [5, 8, 10, 20], 'max_features': [3, 4, 5, None], 'min_samples_split': [2, 10, 12], 'n_estimators': [100, 200, 300]}
rfc = RandomForestClassifier()
clf = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
clf = RandomForestClassifier(bootstrap=False, max_depth=10, max_features=3, min_samples_split=12, n_estimators=100, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Without CV: ', accuracy_score(y_test, y_pred))
scores = cross_val_score(clf, X_train, y_train, cv=10)
print('With CV: ', scores.mean())
print('Precision Score: ', precision_score(y_test, y_pred, average='micro'))
print('Recall Score: ', recall_score(y_test, y_pred, average='micro'))
print('F1 Score: ', f1_score(y_test, y_pred, average='micro')) | code |
122247764/cell_54 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
param_grid = {'bootstrap': [False, True], 'max_depth': [5, 8, 10, 20], 'max_features': [3, 4, 5, None], 'min_samples_split': [2, 10, 12], 'n_estimators': [100, 200, 300]}
rfc = RandomForestClassifier()
clf = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Accuracy: ', accuracy_score(y_test, y_pred))
print(clf.best_params_)
print(clf.best_estimator_) | code |
122247764/cell_52 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Without Scaling and without CV: ', accuracy_score(y_test, y_pred))
scores = cross_val_score(clf, X_train, y_train, cv=10)
print('Without Scaling and With CV: ', scores.mean()) | code |
122247764/cell_49 | [
"text_plain_output_1.png"
] | print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape) | code |
122247764/cell_3 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
import pickle
import warnings
warnings.filterwarnings('ignore') | code |
122247764/cell_46 | [
"text_html_output_1.png"
] | import pandas as pd
student = pd.read_csv('/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv')
student.shape
student.columns
student.sample(4)
student.drop(student.index[student['Target'] == 'Enrolled'], inplace=True)
student.dtypes
student.corr()['Target']
student_df = student.iloc[:, [1, 11, 13, 14, 15, 16, 17, 20, 22, 23, 26, 28, 29, 34]]
student_df.corr()['Target']
X = student_df.iloc[:, 0:13]
y = student_df.iloc[:, -1]
X | code |
122247764/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
student = pd.read_csv('/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv')
student.shape
student.columns
student.sample(4)
student.drop(student.index[student['Target'] == 'Enrolled'], inplace=True)
student.dtypes
student.describe() | code |
122247764/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
student = pd.read_csv('/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv')
student.shape
student.columns
student.sample(4)
student.drop(student.index[student['Target'] == 'Enrolled'], inplace=True)
student | code |
122247764/cell_53 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import cross_val_score
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores = cross_val_score(clf, X_train, y_train, cv=10)
clf = RandomForestClassifier(max_depth=10, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Without CV: ', accuracy_score(y_test, y_pred))
scores = cross_val_score(clf, X_train, y_train, cv=10)
print('With CV: ', scores.mean())
print('Precision Score: ', precision_score(y_test, y_pred, average='macro'))
print('Recall Score: ', recall_score(y_test, y_pred, average='macro'))
print('F1 Score: ', f1_score(y_test, y_pred, average='macro')) | code |
129012199/cell_21 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
# north_american_median = df['NA_Sales'].median()
# print(north_american_median)
# north_american_median_index = df.index[df.NA_Sales == north_american_median][0]
# print(df.iloc[north_american_median_index-5:north_american_median_index+5]['Name'])
median_value = df["NA_Sales"].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values("NA_Sales", ascending=False)
print(median_value)
new_df
top_seller = df.head(1)
top_seller
platform_avgs = df.groupby(by='Platform')['Global_Sales'].mean().sort_values(ascending=False)
platform_avgs
df.tail(1) | code |
129012199/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
median_value = df['NA_Sales'].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values('NA_Sales', ascending=False)
print(median_value)
new_df | code |
129012199/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre | code |
129012199/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
# north_american_median = df['NA_Sales'].median()
# print(north_american_median)
# north_american_median_index = df.index[df.NA_Sales == north_american_median][0]
# print(df.iloc[north_american_median_index-5:north_american_median_index+5]['Name'])
median_value = df["NA_Sales"].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values("NA_Sales", ascending=False)
print(median_value)
new_df
top_seller = df.head(1)
top_seller
platform_avgs = df.groupby(by='Platform')['Global_Sales'].mean().sort_values(ascending=False)
platform_avgs
yearly_sales_by_pub = df.groupby(['Year', 'Publisher'])['Global_Sales'].sum()
idx = yearly_sales_by_pub.groupby('Year').idxmax()
yearly_sales_by_pub.loc[idx] | code |
129012199/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
top_games.head(20) | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.