path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
130015002/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
test_df.shape
test_df['Total Rooms'] = test_df['AveRooms'].apply(lambda x: int(x))
test_df = test_df.drop(['AveRooms'], axis=1)
test_df['HouseAge'] = test_df['HouseAge'].apply(lambda x: int(x))
test_df['Bed Rooms'] = test_df['AveBedrms'].apply(lambda x: int(x))
test_df = test_df.drop(['AveBedrms'], axis=1)
test_df['AveOccup'] = test_df['AveOccup'].apply(lambda x: int(x))
test_df.head(2) | code |
130015002/cell_29 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
test_df.shape | code |
130015002/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(random_state=1)
reg.fit(X_train, y_train)
pred = reg.predict(X_val)
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_val, pred)
print('mae : ', mae) | code |
130015002/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df = train_df.drop(['AveRooms'], axis=1)
train_df = train_df.drop(['AveBedrms'], axis=1)
plt.figure(figsize=(12, 10))
sns.heatmap(train_df.corr(), annot=True, cmap='Greens') | code |
130015002/cell_41 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
test_df.shape
test_df['Total Rooms'] = test_df['AveRooms'].apply(lambda x: int(x))
test_df = test_df.drop(['AveRooms'], axis=1)
test_df['HouseAge'] = test_df['HouseAge'].apply(lambda x: int(x))
test_df['Bed Rooms'] = test_df['AveBedrms'].apply(lambda x: int(x))
test_df = test_df.drop(['AveBedrms'], axis=1)
test_df['AveOccup'] = test_df['AveOccup'].apply(lambda x: int(x))
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(random_state=1)
reg.fit(X_train, y_train)
pred = reg.predict(X_val)
prediction = reg.predict(test_df)
submission = pd.DataFrame({'id': test_df.id, 'MedHouseVal': prediction})
submission.head() | code |
130015002/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum() | code |
130015002/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.head() | code |
130015002/cell_28 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
test_df.head() | code |
130015002/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape | code |
130015002/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df = train_df.drop(['AveRooms'], axis=1)
train_df = train_df.drop(['AveBedrms'], axis=1)
plt.figure(figsize=(12, 6))
sns.pairplot(train_df, x_vars=['Total Rooms'], y_vars=['MedHouseVal'], size=7, kind='scatter', hue='AveOccup', palette='Greens_r')
plt.xlabel('Total Rooms')
plt.ylabel('House Value') | code |
130015002/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df.describe() | code |
130015002/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df = train_df.drop(['AveRooms'], axis=1)
train_df = train_df.drop(['AveBedrms'], axis=1)
train_df.head(2) | code |
130015002/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(random_state=1)
reg.fit(X_train, y_train) | code |
50227915/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
questions | code |
50227915/cell_2 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
print(data.shape)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
data.head() | code |
50227915/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
gender = data[data.columns[2]].value_counts()
man = gender['Man']
woman = gender['Woman']
diff_p = (man - woman) / woman * 100
Male = data[data[questions[2]] == 'Man']
Female = data[data[questions[2]] == 'Woman']
fig, ax = plt.subplots()
m_age_groups = Male[Male.columns[1]].value_counts().sort_index()
sns.barplot(m_age_groups,m_age_groups.index,color="cyan")
f_age_groups = Female[Female.columns[1]].value_counts().sort_index()
sns.barplot(-1 * f_age_groups,f_age_groups.index,color="salmon")
ticks = ax.get_xticks()
plt.tight_layout()
ax.set_xticklabels([int(abs(tick)) for tick in ticks])
red_patch = mpatches.Patch(color='salmon', label='Female')
black_patch = mpatches.Patch(color='cyan', label='Male')
plt.legend(handles=[red_patch, black_patch])
plt.show()
plt.rcParams['figure.figsize'] = (10, 10)
country = data[data.columns[3]].value_counts()
perce = country['India'] / country.sum() * 100
ITEM = data[data.columns[4]].value_counts()
perc = (ITEM.iloc[0] + ITEM.iloc[0]) / ITEM.sum() * 100
plt.rcParams['figure.figsize'] = (10, 6)
prog_lang = data.filter(regex='What programming languages do you use on a regular basis?')
desc = prog_lang.describe()
prog_count = desc.iloc[0].values
prog_names = desc.iloc[2].values
prog_df = pd.DataFrame({'Language': prog_names, 'Count': prog_count})
prog_df = prog_df.set_index('Language')
prog_df.sort_values(inplace=True, by='Count', ascending=False)
sns.barplot(prog_df.Count, prog_df.index)
plt.title('What programming languages do you use on a regular basis?')
plt.show() | code |
50227915/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
gender = data[data.columns[2]].value_counts()
sns.barplot(gender, gender.index)
man = gender['Man']
woman = gender['Woman']
diff_p = (man - woman) / woman * 100
print(f'Men are more than women in this field by {diff_p}%')
plt.show() | code |
50227915/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
gender = data[data.columns[2]].value_counts()
man = gender['Man']
woman = gender['Woman']
diff_p = (man - woman) / woman * 100
Male = data[data[questions[2]] == 'Man']
Female = data[data[questions[2]] == 'Woman']
fig, ax = plt.subplots()
m_age_groups = Male[Male.columns[1]].value_counts().sort_index()
sns.barplot(m_age_groups,m_age_groups.index,color="cyan")
f_age_groups = Female[Female.columns[1]].value_counts().sort_index()
sns.barplot(-1 * f_age_groups,f_age_groups.index,color="salmon")
ticks = ax.get_xticks()
plt.tight_layout()
ax.set_xticklabels([int(abs(tick)) for tick in ticks])
red_patch = mpatches.Patch(color='salmon', label='Female')
black_patch = mpatches.Patch(color='cyan', label='Male')
plt.legend(handles=[red_patch, black_patch])
plt.show()
plt.rcParams['figure.figsize'] = (10, 10)
country = data[data.columns[3]].value_counts()
perce = country['India'] / country.sum() * 100
ITEM = data[data.columns[4]].value_counts()
perc = (ITEM.iloc[0] + ITEM.iloc[0]) / ITEM.sum() * 100
print(f'Masters and Bachelor graduates constitute {perc}% of the total demographic.')
sns.barplot(ITEM, ITEM.index)
plt.show() | code |
50227915/cell_3 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
print(questions[:15]) | code |
50227915/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
gender = data[data.columns[2]].value_counts()
man = gender['Man']
woman = gender['Woman']
diff_p = (man - woman) / woman * 100
Male = data[data[questions[2]] == 'Man']
Female = data[data[questions[2]] == 'Woman']
fig, ax = plt.subplots()
m_age_groups = Male[Male.columns[1]].value_counts().sort_index()
sns.barplot(m_age_groups,m_age_groups.index,color="cyan")
f_age_groups = Female[Female.columns[1]].value_counts().sort_index()
sns.barplot(-1 * f_age_groups,f_age_groups.index,color="salmon")
ticks = ax.get_xticks()
plt.tight_layout()
ax.set_xticklabels([int(abs(tick)) for tick in ticks])
red_patch = mpatches.Patch(color='salmon', label='Female')
black_patch = mpatches.Patch(color='cyan', label='Male')
plt.legend(handles=[red_patch, black_patch])
plt.show()
plt.rcParams['figure.figsize'] = (10, 10)
country = data[data.columns[3]].value_counts()
perce = country['India'] / country.sum() * 100
print(f'The percentage of Indians in the DS Community {perce}')
sns.barplot(country, country.index)
plt.show() | code |
50227915/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
gender = data[data.columns[2]].value_counts()
man = gender['Man']
woman = gender['Woman']
diff_p = (man - woman) / woman * 100
Male = data[data[questions[2]] == 'Man']
Female = data[data[questions[2]] == 'Woman']
fig, ax = plt.subplots()
m_age_groups = Male[Male.columns[1]].value_counts().sort_index()
sns.barplot(m_age_groups, m_age_groups.index, color='cyan')
f_age_groups = Female[Female.columns[1]].value_counts().sort_index()
sns.barplot(-1 * f_age_groups, f_age_groups.index, color='salmon')
ticks = ax.get_xticks()
plt.tight_layout()
ax.set_xticklabels([int(abs(tick)) for tick in ticks])
red_patch = mpatches.Patch(color='salmon', label='Female')
black_patch = mpatches.Patch(color='cyan', label='Male')
plt.legend(handles=[red_patch, black_patch])
plt.show() | code |
50227915/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.patches as mpatches
sns.set_style(style='whitegrid')
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', low_memory=False)
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
questions = list(data.columns)
question_df = pd.DataFrame(data.columns, columns=['questions'])
age_groups = data[data.columns[1]].value_counts().sort_index()
sns.barplot(age_groups, age_groups.index)
mill = age_groups['22-24'] + age_groups['25-29']
mill_percentage = mill / age_groups.sum() * 100
print(f'Millennials in the DS community : {mill}')
print(f'% of Millennials in the DS community : {mill_percentage}')
plt.show() | code |
90155131/cell_13 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras import Model, Sequential
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
from tensorflow.keras import Model, Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow import GradientTape
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = (x_train / 255.0, x_test / 255.0)
data_augmentation = Sequential([RandomFlip('horizontal'), RandomTranslation(height_factor=(-0.1, 0.1), width_factor=(-0.1, 0.1), fill_mode='constant')])
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32).map(lambda x, y: (data_augmentation(x), y))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
def show_image(image):
plt.colorbar()
imagenum = np.random.randint(len(x_train))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(0.0001))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
if stack > 0 and res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
x = Activation('relu')(x)
num_filters *= 2
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True)
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0:
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters_in, kernel_size=1, strides=strides, activation=activation, batch_normalization=batch_normalization, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False)
if res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters_out, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
num_filters_in = num_filters_out
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
def plot_metrics(metric_name, title, append='val_'):
plt.xticks(list(range(len(history.history[metric_name]))))
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
optimizer = SGD(learning_rate=0.01)
loss_object = SparseCategoricalCrossentropy(from_logits=False, reduction='sum')
accuracy_object = SparseCategoricalAccuracy()
model.compile(optimizer=optimizer, loss=loss_object, metrics=[accuracy_object])
with open('template1.txt', 'w') as f:
history = model.fit(train_ds, validation_data=test_ds, epochs=10)
print(history.history, file=f) | code |
90155131/cell_9 | [
"image_output_2.png",
"image_output_1.png"
] | from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(0.0001))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
if stack > 0 and res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
x = Activation('relu')(x)
num_filters *= 2
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True)
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0:
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters_in, kernel_size=1, strides=strides, activation=activation, batch_normalization=batch_normalization, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False)
if res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters_out, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
num_filters_in = num_filters_out
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
plot_model(model) | code |
90155131/cell_18 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
def show_image(image):
plt.colorbar()
imagenum = np.random.randint(len(x_train))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def plot_metrics(metric_name, title, append='val_'):
plt.xticks(list(range(len(history.history[metric_name]))))
with plt.xkcd():
plot_metrics(metric_name='loss', title='Plot of Model Loss against number of Epochs', append='test_')
plot_metrics(metric_name='acc', title='Plot of Model Accuracy against number of Epochs', append='test_') | code |
90155131/cell_16 | [
"image_output_1.png"
] | from tensorflow import GradientTape
from tensorflow.keras import Model, Sequential
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
from tensorflow.keras import Model, Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow import GradientTape
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = (x_train / 255.0, x_test / 255.0)
data_augmentation = Sequential([RandomFlip('horizontal'), RandomTranslation(height_factor=(-0.1, 0.1), width_factor=(-0.1, 0.1), fill_mode='constant')])
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32).map(lambda x, y: (data_augmentation(x), y))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
def show_image(image):
plt.colorbar()
imagenum = np.random.randint(len(x_train))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
from tensorflow.keras.layers import Add, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, Input, Activation, Dense, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(0.0001))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
if stack > 0 and res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
x = Activation('relu')(x)
num_filters *= 2
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True)
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0:
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0:
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters_in, kernel_size=1, strides=strides, activation=activation, batch_normalization=batch_normalization, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False)
if res_block == 0:
x = resnet_layer(inputs=x, num_filters=num_filters_out, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = Add()([x, y])
num_filters_in = num_filters_out
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
def plot_metrics(metric_name, title, append='val_'):
plt.xticks(list(range(len(history.history[metric_name]))))
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
optimizer = SGD(learning_rate=0.01)
loss_object = SparseCategoricalCrossentropy(from_logits=False, reduction='sum')
accuracy_object = SparseCategoricalAccuracy()
model.compile(optimizer=optimizer, loss=loss_object, metrics=[accuracy_object])
with open('template1.txt', 'w') as f:
history = model.fit(train_ds, validation_data=test_ds, epochs=10)
from tqdm import tqdm
class History:
def __init__(self, metrics):
self.history = {x: [] for x in metrics}
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
optimizer = SGD(learning_rate=0.01)
loss_object = SparseCategoricalCrossentropy(from_logits=False, reduction='sum')
accuracy_object = SparseCategoricalAccuracy()
metrics = ['acc', 'loss', 'test_acc', 'test_loss']
history = History(metrics)
for epoch in range(10):
losses = []
accuracy_object.reset_states()
pbar = tqdm(train_ds)
for train_images, train_labels in pbar:
with GradientTape() as tape:
predictions = model(train_images, training=True)
loss = loss_object(train_labels, predictions)
losses.append(loss.numpy())
accuracy_object.update_state(train_labels, predictions)
pbar.set_description(f'Epoch: {epoch + 1}, Train Loss: {np.mean(losses):.3f}, Train Acc: {accuracy_object.result().numpy():.3f}')
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = accuracy_object.result().numpy()
loss = np.mean(losses)
pbar.close()
losses = []
accuracy_object.reset_states()
pbar = tqdm(test_ds)
for test_images, test_labels in pbar:
with GradientTape() as tape:
predictions = model(test_images, training=False)
loss = loss_object(test_labels, predictions)
losses.append(loss.numpy())
accuracy_object.update_state(test_labels, predictions)
pbar.set_description(f'Epoch: {epoch + 1}, Test Loss: {np.mean(losses):.3f}, Test Acc: {accuracy_object.result().numpy():.3f}')
test_acc = accuracy_object.result().numpy()
test_loss = np.mean(losses)
pbar.write(f'Epoch {epoch + 1}, Train Loss: {loss}, Train Acc: {acc}, Test Loss: {test_loss}, Test Acc: {test_acc}')
pbar.close()
with open('template2.txt', 'w') as f:
for metric in metrics:
history.history[metric].append(vars()[metric])
print(history.history, file=f) | code |
90155131/cell_3 | [
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_7.png",
"application_vnd.jupyter.stderr_output_11.png",
"text_plain_output_20.png",
"text_plain_output_4.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_18.png",
"application_vnd.jupyter.stderr_output_19.png",
"application_vnd.jupyter.stderr_output_13.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_16.png",
"application_vnd.jupyter.stderr_output_15.png",
"text_plain_output_8.png",
"application_vnd.jupyter.stderr_output_17.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"text_plain_output_12.png",
"application_vnd.jupyter.stderr_output_21.png"
] | from tensorflow.keras import Model, Sequential
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
import tensorflow as tf
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Add
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomZoom, RandomTranslation
from tensorflow.keras import Model, Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow import GradientTape
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = (x_train / 255.0, x_test / 255.0)
data_augmentation = Sequential([RandomFlip('horizontal'), RandomTranslation(height_factor=(-0.1, 0.1), width_factor=(-0.1, 0.1), fill_mode='constant')])
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32).map(lambda x, y: (data_augmentation(x), y))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) | code |
90155131/cell_17 | [
"text_plain_output_4.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
def show_image(image):
plt.colorbar()
imagenum = np.random.randint(len(x_train))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def plot_metrics(metric_name, title, append='val_'):
plt.xticks(list(range(len(history.history[metric_name]))))
plot_metrics(metric_name='loss', title='Plot of Model Loss against number of Epochs', append='test_')
plot_metrics(metric_name='acc', title='Plot of Model Accuracy against number of Epochs', append='test_') | code |
90155131/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
def show_image(image):
plt.colorbar()
imagenum = np.random.randint(len(x_train))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def plot_metrics(metric_name, title, append='val_'):
plt.xticks(list(range(len(history.history[metric_name]))))
plot_metrics(metric_name='loss', title='Plot of Model Loss against number of Epochs', append='val_')
plot_metrics(metric_name='sparse_categorical_accuracy', title='Plot of Model Accuracy against number of Epochs', append='val_') | code |
90155131/cell_5 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
def show_image(image):
plt.figure()
plt.imshow(image)
plt.colorbar()
plt.grid(False)
plt.show()
imagenum = np.random.randint(len(x_train))
show_image(x_train[imagenum].reshape(32, 32, 3))
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print('Class:', classes[int(y_train[imagenum])]) | code |
90136679/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import spacy
f = open('/kaggle/input/harry-potter-sorcerers-stone/Harry-potter-sorcerers-stone.txt', 'r')
hp_book = ''
lines = []
for line in f:
stripped_line = line.rstrip() + ' '
hp_book += stripped_line
lines.append(line)
f.close()
nlp = spacy.load('en_core_web_lg')
doc = nlp(hp_book) | code |
128003024/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
print('age vs chol_serum')
sns.scatterplot(data=df, x='age', y='chol_serum', hue='bloodsugar_fast')
plt.show() | code |
128003024/cell_13 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
Q1 = np.quantile(df['bloodpress_med'], 0.25)
Q3 = np.quantile(df['bloodpress_med'], 0.75)
IQR = Q3 - Q1
min_IQR = Q1 - 1.5 * IQR
max_IQR = Q3 + 1.5 * IQR
low_out = []
high_out = []
for i in df['bloodpress_med']:
if i < min_IQR:
low_out.append(i)
if i > max_IQR:
high_out.append(i)
print('High outlier : ', high_out)
print('upper limit : ', min(high_out)) | code |
128003024/cell_25 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
print('bloodpress_med vs chol_serum')
sns.scatterplot(data=df, x='bloodpress_med', y='chol_serum', hue='bloodsugar_fast')
plt.show() | code |
128003024/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128003024/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
print('age vs bloodpess_med')
sns.scatterplot(data=df, x='age', y='bloodpress_med', hue='bloodsugar_fast')
plt.show() | code |
128003024/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True) | code |
128003024/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
df.head() | code |
128003024/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
corr_matrix = df.corr(numeric_only=True)
plt.figure(figsize=(20, 5))
sns.heatmap(corr_matrix, cmap='YlGnBu', annot=True)
plt.show() | code |
128003024/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df) | code |
128003024/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
df.info() | code |
128003024/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True) | code |
128003024/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df) | code |
128003024/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.hist(bins=15, figsize=(30, 15))
plt.show() | code |
128003024/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (8, 5)
plt.style.use('fivethirtyeight')
import seaborn as sns
import plotly.express as p
from plotly.offline import iplot
import os
import glob
from sklearn.cluster import KMeans | code |
128003024/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.describe(include='all') | code |
128003024/cell_24 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
print('age vs max_heartrate')
sns.scatterplot(data=df, x='age', y='max_heartrate', hue='thal')
plt.show() | code |
128003024/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
Q1 = np.quantile(df['bloodpress_med'], 0.25)
Q3 = np.quantile(df['bloodpress_med'], 0.75)
IQR = Q3 - Q1
min_IQR = Q1 - 1.5 * IQR
max_IQR = Q3 + 1.5 * IQR
low_out = []
high_out = []
for i in df['bloodpress_med']:
if i < min_IQR:
low_out.append(i)
if i > max_IQR:
high_out.append(i)
print(np.unique(df[['sex', 'chest_pain_type', 'bloodsugar_fast', 'rest_ecg', 'exc_angina', 'slope', 'major_vessels', 'thal']].values)) | code |
128003024/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.cov(numeric_only=True)
df.corr(numeric_only=True)
print('age vs oldpeak')
sns.scatterplot(data=df, x='age', y='oldpeak', hue='exc_angina')
plt.show() | code |
128003024/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/hearts/heart.csv')
np.shape(df)
df.rename(columns={'cp': 'chest_pain_type', 'trestbps': 'bloodpress_med', 'chol': 'chol_serum', 'fbs': 'bloodsugar_fast', 'restecg': 'rest_ecg', 'thalach': 'max_heartrate', 'exang': 'exc_angina', 'ca': 'major_vessels'}, inplace=True)
df = df.drop_duplicates()
np.shape(df)
df.boxplot(figsize=(15, 5), rot=45, fontsize=15, grid=True) | code |
90108118/cell_13 | [
"text_plain_output_1.png"
] | from absl import flags
from gezi import tqdm
import gezi
import glob
import os
from IPython.display import display
import tensorflow as tf
import torch
from absl import flags
FLAGS = flags.FLAGS
from transformers import AutoTokenizer
from datasets import Dataset
from src import config
from src.util import *
from src.get_preds import *
from src.eval import *
import melt as mt
import numpy as np
import glob
import gc
from numba import cuda
from gezi import tqdm
import gezi
import husky
import lele
gezi.init_flags()
model_root = '../input'
model_dirs = [x for x in glob.glob(f'{model_root}/feedback-model*') if os.path.isdir(x)]
model_dirs = [f'../input/feedback-model{i}' for i in range(len(model_dirs))]
model_dir = model_dirs[0]
tf_models = []
first_models = []
ic(first_models)
model_dirs = gezi.unique_list([*tf_models, *first_models, *model_dirs])
m = model_dirs
used_model_indexes = list(range(len(model_dirs)))
used_model_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17]
used_model_indexes = [23]
model_dirs = [m[i] for i in used_model_indexes]
used_tf_models = [x for x in model_dirs if x in tf_models]
num_tf_models = len(used_tf_models)
mns = []
for i, model_dir in tqdm(enumerate(model_dirs), total=len(model_dirs)):
gezi.restore_configs(model_dir)
mns.append(os.path.basename(FLAGS.model_dir))
assert 'online' in FLAGS.model_dir
mns
MIN_WEIGHT = 1
weights_dict = {}
weights_dict = {'bart.start.run2': 9, 'roberta.start.nwemb-0': 9, 'deberta.start': 8, 'deberta-xlarge.start': 9, 'deberta-xlarge.end': 9, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1536': 7, 'deberta-v3.start.len1024.rnn_bi': 8, 'deberta-v3.end.len1024.seq_encoder-0': 10, 'deberta-v3.mid.len1024': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 7, 'deberta-v3.start.nwemb-0.mark_end-0': 10, 'deberta-v3.se': 10, 'deberta-v3.se2': 10, 'longformer.start.len1536': 6, 'longformer.start.len1600': 6, 'funnel.start.len1536.bs-8': 6, 'deberta-v3.start.len1024.stride-512': 4, 'electra.start.nwemb-0.run2': 7}
weights_dict0 = {'bart.start.run2': 6, 'deberta-v3.end.len1024.seq_encoder-0': 6, 'deberta-v3.mid.len1024': 4, 'deberta-v3.se': 6, 'deberta-v3.se2': 1, 'deberta-v3.start.len1024.rnn_bi': 5, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1536': 4, 'deberta-v3.start.nwemb-0.mark_end-0': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 9, 'deberta-xlarge.end': 0, 'deberta-xlarge.start': 6, 'deberta.start': 6, 'longformer.start.len1536': 9, 'roberta.start.nwemb-0': 6}
weights_dict1 = {'bart.start.run2': 7, 'deberta-v3.end.len1024.seq_encoder-0': 10, 'deberta-v3.mid.len1024': 6, 'deberta-v3.se': 2, 'deberta-v3.se2': 7, 'deberta-v3.start.len1024.rnn_bi': 8, 'deberta-v3.start.len1024.stride-256': 10, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 7, 'deberta-v3.start.len1536': 8, 'deberta-v3.start.nwemb-0.mark_end-0': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 7, 'deberta-xlarge.end': 7, 'deberta-xlarge.start': 10, 'deberta.start': 6, 'longformer.start.len1536': 8, 'roberta.start.nwemb-0': 5}
weights_dict2 = {'bart.start.run2': 6, 'deberta-v3.end.len1024.seq_encoder-0': 7, 'deberta-v3.mid.len1024': 5, 'deberta-v3.se': 9, 'deberta-v3.se2': 5, 'deberta-v3.start.len1024.rnn_bi': 9, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1536': 6, 'deberta-v3.start.nwemb-0.mark_end-0': 0, 'deberta-v3.start.stride-256.seq_encoder-0': 5, 'deberta-xlarge.end': 8, 'deberta-xlarge.start': 4, 'deberta.start': 8, 'longformer.start.len1536': 9, 'roberta.start.nwemb-0': 6}
weights_dicts = [weights_dict0, weights_dict1, weights_dict2]
ic(gezi.sort_byval(weights_dict))
len(weights_dict) | code |
90108118/cell_9 | [
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_6.png",
"text_html_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import gezi
import glob
import os
gezi.init_flags()
model_root = '../input'
model_dirs = [x for x in glob.glob(f'{model_root}/feedback-model*') if os.path.isdir(x)]
model_dirs = [f'../input/feedback-model{i}' for i in range(len(model_dirs))]
model_dir = model_dirs[0]
tf_models = []
first_models = []
ic(first_models)
model_dirs = gezi.unique_list([*tf_models, *first_models, *model_dirs])
m = model_dirs
used_model_indexes = list(range(len(model_dirs)))
used_model_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17]
used_model_indexes = [23]
model_dirs = [m[i] for i in used_model_indexes]
used_tf_models = [x for x in model_dirs if x in tf_models]
num_tf_models = len(used_tf_models) | code |
90108118/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install -q icecream --no-index --find-links=file:///kaggle/input/icecream/ | code |
90108118/cell_1 | [
"text_plain_output_1.png"
] | import sys
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import traceback
!ln -s ../input/feedback ./src
if os.path.exists('/kaggle'):
sys.path.append('/kaggle/input/pikachu/utils')
sys.path.append('/kaggle/input/pikachu/third')
sys.path.append('.')
!ls ../input | code |
90108118/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | num_test_ids = 1000
folds = pd.read_csv('../input/feedback/folds.csv')
test_ids = folds[folds.kfold == 0].id.values
test_ids.sort()
test_ids = test_ids[:num_test_ids]
len(test_ids) | code |
90108118/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | ic(P) | code |
90108118/cell_3 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install -q pymp-pypi --no-index --find-links=file:///kaggle/input/pymp-pypi/pymp-pypi-0.4.5/dist | code |
90108118/cell_14 | [
"text_plain_output_1.png"
] | from absl import flags
from gezi import tqdm
import gezi
import glob
import os
from IPython.display import display
import tensorflow as tf
import torch
from absl import flags
FLAGS = flags.FLAGS
from transformers import AutoTokenizer
from datasets import Dataset
from src import config
from src.util import *
from src.get_preds import *
from src.eval import *
import melt as mt
import numpy as np
import glob
import gc
from numba import cuda
from gezi import tqdm
import gezi
import husky
import lele
gezi.init_flags()
model_root = '../input'
model_dirs = [x for x in glob.glob(f'{model_root}/feedback-model*') if os.path.isdir(x)]
model_dirs = [f'../input/feedback-model{i}' for i in range(len(model_dirs))]
model_dir = model_dirs[0]
tf_models = []
first_models = []
ic(first_models)
model_dirs = gezi.unique_list([*tf_models, *first_models, *model_dirs])
m = model_dirs
used_model_indexes = list(range(len(model_dirs)))
used_model_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17]
used_model_indexes = [23]
model_dirs = [m[i] for i in used_model_indexes]
used_tf_models = [x for x in model_dirs if x in tf_models]
num_tf_models = len(used_tf_models)
mns = []
for i, model_dir in tqdm(enumerate(model_dirs), total=len(model_dirs)):
gezi.restore_configs(model_dir)
mns.append(os.path.basename(FLAGS.model_dir))
assert 'online' in FLAGS.model_dir
mns
MIN_WEIGHT = 1
weights_dict = {}
weights_dict = {'bart.start.run2': 9, 'roberta.start.nwemb-0': 9, 'deberta.start': 8, 'deberta-xlarge.start': 9, 'deberta-xlarge.end': 9, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1536': 7, 'deberta-v3.start.len1024.rnn_bi': 8, 'deberta-v3.end.len1024.seq_encoder-0': 10, 'deberta-v3.mid.len1024': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 7, 'deberta-v3.start.nwemb-0.mark_end-0': 10, 'deberta-v3.se': 10, 'deberta-v3.se2': 10, 'longformer.start.len1536': 6, 'longformer.start.len1600': 6, 'funnel.start.len1536.bs-8': 6, 'deberta-v3.start.len1024.stride-512': 4, 'electra.start.nwemb-0.run2': 7}
weights_dict0 = {'bart.start.run2': 6, 'deberta-v3.end.len1024.seq_encoder-0': 6, 'deberta-v3.mid.len1024': 4, 'deberta-v3.se': 6, 'deberta-v3.se2': 1, 'deberta-v3.start.len1024.rnn_bi': 5, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1536': 4, 'deberta-v3.start.nwemb-0.mark_end-0': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 9, 'deberta-xlarge.end': 0, 'deberta-xlarge.start': 6, 'deberta.start': 6, 'longformer.start.len1536': 9, 'roberta.start.nwemb-0': 6}
weights_dict1 = {'bart.start.run2': 7, 'deberta-v3.end.len1024.seq_encoder-0': 10, 'deberta-v3.mid.len1024': 6, 'deberta-v3.se': 2, 'deberta-v3.se2': 7, 'deberta-v3.start.len1024.rnn_bi': 8, 'deberta-v3.start.len1024.stride-256': 10, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 7, 'deberta-v3.start.len1536': 8, 'deberta-v3.start.nwemb-0.mark_end-0': 8, 'deberta-v3.start.stride-256.seq_encoder-0': 7, 'deberta-xlarge.end': 7, 'deberta-xlarge.start': 10, 'deberta.start': 6, 'longformer.start.len1536': 8, 'roberta.start.nwemb-0': 5}
weights_dict2 = {'bart.start.run2': 6, 'deberta-v3.end.len1024.seq_encoder-0': 7, 'deberta-v3.mid.len1024': 5, 'deberta-v3.se': 9, 'deberta-v3.se2': 5, 'deberta-v3.start.len1024.rnn_bi': 9, 'deberta-v3.start.len1024.stride-256': 6, 'deberta-v3.start.len1024.stride-256.seq_encoder-0': 10, 'deberta-v3.start.len1536': 6, 'deberta-v3.start.nwemb-0.mark_end-0': 0, 'deberta-v3.start.stride-256.seq_encoder-0': 5, 'deberta-xlarge.end': 8, 'deberta-xlarge.start': 4, 'deberta.start': 8, 'longformer.start.len1536': 9, 'roberta.start.nwemb-0': 6}
weights_dicts = [weights_dict0, weights_dict1, weights_dict2]
ic(gezi.sort_byval(weights_dict))
len(weights_dict)
def get_weight(x, idx=0):
weight = 1
if x in weights_dict:
return weights_dicts[idx][x]
return max(weight, 1)
weights = [get_weight(x) for x in mns]
weights2 = [get_weight(x, 1) for x in mns]
weights3 = [get_weight(x, 2) for x in mns]
ic(list(zip(range(len(model_dirs)), model_dirs, mns, weights)), len(model_dirs)) | code |
90108118/cell_12 | [
"text_plain_output_1.png"
] | from absl import flags
from gezi import tqdm
import gezi
import glob
import os
from IPython.display import display
import tensorflow as tf
import torch
from absl import flags
FLAGS = flags.FLAGS
from transformers import AutoTokenizer
from datasets import Dataset
from src import config
from src.util import *
from src.get_preds import *
from src.eval import *
import melt as mt
import numpy as np
import glob
import gc
from numba import cuda
from gezi import tqdm
import gezi
import husky
import lele
gezi.init_flags()
model_root = '../input'
model_dirs = [x for x in glob.glob(f'{model_root}/feedback-model*') if os.path.isdir(x)]
model_dirs = [f'../input/feedback-model{i}' for i in range(len(model_dirs))]
model_dir = model_dirs[0]
tf_models = []
first_models = []
ic(first_models)
model_dirs = gezi.unique_list([*tf_models, *first_models, *model_dirs])
m = model_dirs
used_model_indexes = list(range(len(model_dirs)))
used_model_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17]
used_model_indexes = [23]
model_dirs = [m[i] for i in used_model_indexes]
used_tf_models = [x for x in model_dirs if x in tf_models]
num_tf_models = len(used_tf_models)
mns = []
for i, model_dir in tqdm(enumerate(model_dirs), total=len(model_dirs)):
gezi.restore_configs(model_dir)
mns.append(os.path.basename(FLAGS.model_dir))
assert 'online' in FLAGS.model_dir
mns | code |
32062709/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.head() | code |
32062709/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.head() | code |
32062709/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5] | code |
32062709/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5]
economics.groupby('Region')[['2019 Score', 'GDP Growth Rate (%)']].mean()
economics.groupby('Region')['2019 Score'].idxmin()
economics.groupby('Region')['2019 Score'].std()
economics.groupby('Region').size() | code |
32062709/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32062709/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics[economics['GDP Growth Rate (%)'] >= 8.0].index.to_list() | code |
32062709/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5]
economics.groupby('Region')[['2019 Score', 'GDP Growth Rate (%)']].mean()
economics.groupby('Region')['2019 Score'].idxmin()
economics.groupby('Region')['2019 Score'].std()
economics.groupby('Region').size()
economics.groupby('Region')['2019 Score'].mean().sort_values().plot.barh() | code |
32062709/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list() | code |
32062709/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)'] | code |
32062709/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5]
economics.groupby('Region')[['2019 Score', 'GDP Growth Rate (%)']].mean()
economics.groupby('Region')['2019 Score'].idxmin() | code |
32062709/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5]
economics.groupby('Region')[['2019 Score', 'GDP Growth Rate (%)']].mean() | code |
32062709/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape | code |
32062709/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
economics = pd.read_csv('/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv', index_col='Country', encoding='ISO-8859-1')
economics.columns.to_list()
economics.shape
economics = economics[['Region', 'World Rank', 'Region Rank', '2019 Score', 'Population (Millions)', 'GDP Growth Rate (%)', 'Unemployment (%)', 'Inflation (%)']]
economics.loc['Brazil', 'Unemployment (%)']
economics.sort_values('2019 Score', ascending=False).iloc[:5]
economics.groupby('Region')[['2019 Score', 'GDP Growth Rate (%)']].mean()
economics.groupby('Region')['2019 Score'].idxmin()
economics.groupby('Region')['2019 Score'].std() | code |
106208845/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=goalkeeper_scores[:10], x='Team', y='Gk score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 GOALKEEPER SCORE');
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=defence_scores[:10], x='Team', y='Df score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 STRONGEST DEFENCE');
offence_score = []
for x in fifa2022_teams:
of_score = np.round((df[df['home_team'] == x]['home_team_mean_offense_score'].mean() + df[df['away_team'] == x]['away_team_mean_offense_score'].mean()) / 2, 2)
offence_score.append(of_score)
offence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Of score': offence_score}).sort_values('Of score', ascending=False).reset_index(drop=True)
offence_scores.index += 1
offence_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=offence_scores[:10], x='Team', y='Of score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 OFFENCE SCORE');
midfield_score = []
for x in fifa2022_teams:
md_score = np.round((df[df['home_team'] == x]['home_team_mean_midfield_score'].mean() + df[df['away_team'] == x]['away_team_mean_midfield_score'].mean()) / 2, 2)
midfield_score.append(md_score)
midfield_scores = pd.DataFrame({'Team': fifa2022_teams, 'Md score': midfield_score}).sort_values('Md score', ascending=False).reset_index(drop=True)
midfield_scores.index += 1
midfield_scores
plt.figure(figsize=(11, 7), dpi=90)
ax = sns.barplot(data=midfield_scores[:10], x='Team', y='Md score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 MIDFIELD SCORE') | code |
106208845/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
print('\nTop 10 team ranking:\n')
team_ranks[:10] | code |
106208845/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
df | code |
106208845/cell_20 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
offence_score = []
for x in fifa2022_teams:
of_score = np.round((df[df['home_team'] == x]['home_team_mean_offense_score'].mean() + df[df['away_team'] == x]['away_team_mean_offense_score'].mean()) / 2, 2)
offence_score.append(of_score)
offence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Of score': offence_score}).sort_values('Of score', ascending=False).reset_index(drop=True)
offence_scores.index += 1
offence_scores
midfield_score = []
for x in fifa2022_teams:
md_score = np.round((df[df['home_team'] == x]['home_team_mean_midfield_score'].mean() + df[df['away_team'] == x]['away_team_mean_midfield_score'].mean()) / 2, 2)
midfield_score.append(md_score)
midfield_scores = pd.DataFrame({'Team': fifa2022_teams, 'Md score': midfield_score}).sort_values('Md score', ascending=False).reset_index(drop=True)
midfield_scores.index += 1
midfield_scores | code |
106208845/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
df.describe().T | code |
106208845/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=goalkeeper_scores[:10], x='Team', y='Gk score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 GOALKEEPER SCORE');
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=defence_scores[:10], x='Team', y='Df score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 STRONGEST DEFENCE');
offence_score = []
for x in fifa2022_teams:
of_score = np.round((df[df['home_team'] == x]['home_team_mean_offense_score'].mean() + df[df['away_team'] == x]['away_team_mean_offense_score'].mean()) / 2, 2)
offence_score.append(of_score)
offence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Of score': offence_score}).sort_values('Of score', ascending=False).reset_index(drop=True)
offence_scores.index += 1
offence_scores
plt.figure(figsize=(11, 7), dpi=90)
ax = sns.barplot(data=offence_scores[:10], x='Team', y='Of score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 OFFENCE SCORE') | code |
106208845/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
print(f'Numerical columns: \n\n{num_cols}\n\nCategorical columns: \n\n{cat_cols}') | code |
106208845/cell_18 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
offence_score = []
for x in fifa2022_teams:
of_score = np.round((df[df['home_team'] == x]['home_team_mean_offense_score'].mean() + df[df['away_team'] == x]['away_team_mean_offense_score'].mean()) / 2, 2)
offence_score.append(of_score)
offence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Of score': offence_score}).sort_values('Of score', ascending=False).reset_index(drop=True)
offence_scores.index += 1
offence_scores | code |
106208845/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
print(f"Columns contain 'null' values: \n\n{columns_contains_null}") | code |
106208845/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
plt.figure(figsize=(11, 7), dpi=90)
ax = sns.barplot(data=goalkeeper_scores[:10], x='Team', y='Gk score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 GOALKEEPER SCORE') | code |
106208845/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores | code |
106208845/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
plt.figure(figsize=(11,7), dpi=90)
ax = sns.barplot(data=goalkeeper_scores[:10], x='Team', y='Gk score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 GOALKEEPER SCORE');
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
plt.figure(figsize=(11, 7), dpi=90)
ax = sns.barplot(data=defence_scores[:10], x='Team', y='Df score')
ax.bar_label(ax.containers[0])
plt.xlabel('TEAM')
plt.ylabel('SCORE')
plt.title('TOP 10 STRONGEST DEFENCE') | code |
106208845/cell_14 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores | code |
106208845/cell_22 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks
goalkeeper_score = []
for x in fifa2022_teams:
gk_score = np.round((df[df['home_team'] == x]['home_team_goalkeeper_score'].mean() + df[df['away_team'] == x]['away_team_goalkeeper_score'].mean()) / 2, 2)
goalkeeper_score.append(gk_score)
goalkeeper_scores = pd.DataFrame({'Team': fifa2022_teams, 'Gk score': goalkeeper_score}).sort_values('Gk score', ascending=False).reset_index(drop=True)
goalkeeper_scores.index += 1
goalkeeper_scores
defence_score = []
for x in fifa2022_teams:
df_score = np.round((df[df['home_team'] == x]['home_team_mean_defense_score'].mean() + df[df['away_team'] == x]['away_team_mean_defense_score'].mean()) / 2, 2)
defence_score.append(df_score)
defence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Df score': defence_score}).sort_values('Df score', ascending=False).reset_index(drop=True)
defence_scores.index += 1
defence_scores
offence_score = []
for x in fifa2022_teams:
of_score = np.round((df[df['home_team'] == x]['home_team_mean_offense_score'].mean() + df[df['away_team'] == x]['away_team_mean_offense_score'].mean()) / 2, 2)
offence_score.append(of_score)
offence_scores = pd.DataFrame({'Team': fifa2022_teams, 'Of score': offence_score}).sort_values('Of score', ascending=False).reset_index(drop=True)
offence_scores.index += 1
offence_scores
midfield_score = []
for x in fifa2022_teams:
md_score = np.round((df[df['home_team'] == x]['home_team_mean_midfield_score'].mean() + df[df['away_team'] == x]['away_team_mean_midfield_score'].mean()) / 2, 2)
midfield_score.append(md_score)
midfield_scores = pd.DataFrame({'Team': fifa2022_teams, 'Md score': midfield_score}).sort_values('Md score', ascending=False).reset_index(drop=True)
midfield_scores.index += 1
midfield_scores
hwins, hdraws, hloses = ([], [], [])
awins, adraws, aloses = ([], [], [])
for team in fifa2022_teams:
home_win = df[df['home_team'] == team][df['home_team_result'] == 'Win'].shape[0]
home_draw = df[df['home_team'] == team][df['home_team_result'] == 'Draw'].shape[0]
home_lose = df[df['home_team'] == team][df['home_team_result'] == 'Lose'].shape[0]
away_win = df[df['away_team'] == team][df['home_team_result'] == 'Lose'].shape[0]
away_draw = df[df['away_team'] == team][df['home_team_result'] == 'Draw'].shape[0]
away_lose = df[df['away_team'] == team][df['home_team_result'] == 'Win'].shape[0]
hwins.append(home_win)
hdraws.append(home_draw)
hloses.append(home_lose)
awins.append(away_win)
adraws.append(away_draw)
aloses.append(away_lose)
wins = np.add(hwins, awins)
draws = np.add(hdraws, adraws)
loses = np.add(hloses, aloses)
win_draw_lose = pd.DataFrame({'Team': fifa2022_teams, 'Win': wins, 'Draw': draws, 'Lose': loses, 'Home win': hwins, 'Home draw': hdraws, 'Home lose': hloses, 'Away win': awins, 'Away draw': adraws, 'Away lose': aloses})
win_draw_lose.insert(1, 'Total', win_draw_lose['Win'] + win_draw_lose['Draw'] + win_draw_lose['Lose'])
win_draw_lose.insert(8, 'Total Home', win_draw_lose['Home win'] + win_draw_lose['Home draw'] + win_draw_lose['Home lose'])
win_draw_lose.insert(12, 'Total Away', win_draw_lose['Away win'] + win_draw_lose['Away draw'] + win_draw_lose['Away lose'])
win_draw_lose['Win %'] = np.around(100 * win_draw_lose['Win'] / win_draw_lose['Total'], 2)
win_draw_lose['Draw %'] = np.round(100 * win_draw_lose['Draw'] / win_draw_lose['Total'], 2)
win_draw_lose['Lose %'] = np.round(100 * win_draw_lose['Lose'] / win_draw_lose['Total'], 2)
win_draw_lose['Home Win %'] = np.round(100 * win_draw_lose['Home win'] / win_draw_lose['Total Home'], 2)
win_draw_lose['Home Draw %'] = np.round(100 * win_draw_lose['Home draw'] / win_draw_lose['Total Home'], 2)
win_draw_lose['Home Lose %'] = np.round(100 * win_draw_lose['Home lose'] / win_draw_lose['Total Home'], 2)
win_draw_lose['Away Win %'] = np.round(100 * win_draw_lose['Away win'] / win_draw_lose['Total Away'], 2)
win_draw_lose['Away Draw %'] = np.round(100 * win_draw_lose['Away draw'] / win_draw_lose['Total Away'], 2)
win_draw_lose['Away Lose %'] = np.round(100 * win_draw_lose['Away lose'] / win_draw_lose['Total Away'], 2)
win_draw_lose = win_draw_lose.sort_values('Win %', ascending=False).reset_index(drop=True)
win_draw_lose.index += 1
win_draw_lose.style.set_properties(**{'background-color': 'gray', 'color': 'yellow'}, subset=['Home win', 'Home Win %', 'Win %']) | code |
106208845/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
pd.set_option('display.max_columns', None)
df.describe().T
num_cols = list(df.select_dtypes(include=['int64', 'float64']).columns)
cat_cols = list(df.select_dtypes(include=['object']).columns)
columns_contains_null = [col for col in df.columns if df[col].isnull().any()]
fifa2022_teams = ['Qatar', 'Ecuador', 'Senegal', 'Netherlands', 'England', 'IR Iran', 'USA', 'Wales', 'Argentina', 'Saudi Arabia', 'Mexico', 'Poland', 'France', 'Australia', 'Denmark', 'Tunisia', 'Spain', 'Costa Rica', 'Germany', 'Japan', 'Belgium', 'Canada', 'Morocco', 'Croatia', 'Brazil', 'Serbia', 'Switzerland', 'Cameroon', 'Portugal', 'Ghana', 'Uruguay', 'Korea Republic']
ranks = []
for x in fifa2022_teams:
rank_df = df[(df['home_team'] == x) | (df['away_team'] == x)].sort_values(['date', 'home_team_fifa_rank', 'away_team_fifa_rank'], ascending=[False, True, True]).iloc[0]
if rank_df['home_team'] == x:
rank = rank_df['home_team_fifa_rank']
else:
rank = rank_df['away_team_fifa_rank']
ranks.append(rank)
team_ranks = pd.DataFrame({'Team': fifa2022_teams, 'Rank': ranks}).sort_values('Rank').reset_index(drop=True)
team_ranks.index += 1
team_ranks | code |
106208845/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa-world-cup-2022/international_matches.csv')
df.info() | code |
18159050/cell_9 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True)
def get_generator(path):
return train_datagen.flow_from_directory(path, target_size=(40, 96), batch_size=32, class_mode='categorical', color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/train/')
test_generator = get_generator('../input/transmittancy/test/')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=2, activation='relu', strides=(2, 2), input_shape=(40, 96, 1)))
model.add(Conv2D(64, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Conv2D(32, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=test_generator, validation_steps=30)
import matplotlib.pyplot as plt
def plot_batch(batch):
fig, axes = plt.subplots(4, 8, sharex=True, sharey=True, figsize=(16, 4))
for ind, ax in enumerate(axes.flatten()):
ax.imshow(batch[ind].reshape(40, 96), vmin=0, vmax=1, interpolation=None, cmap='gray')
fig.tight_layout()
plt.show()
batch, _ = train_generator.next()
plot_batch(batch) | code |
18159050/cell_6 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True)
def get_generator(path):
return train_datagen.flow_from_directory(path, target_size=(40, 96), batch_size=32, class_mode='categorical', color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/train/')
test_generator = get_generator('../input/transmittancy/test/')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=2, activation='relu', strides=(2, 2), input_shape=(40, 96, 1)))
model.add(Conv2D(64, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Conv2D(32, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=test_generator, validation_steps=30) | code |
18159050/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator | code |
18159050/cell_7 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True)
def get_generator(path):
return train_datagen.flow_from_directory(path, target_size=(40, 96), batch_size=32, class_mode='categorical', color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/train/')
test_generator = get_generator('../input/transmittancy/test/')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=2, activation='relu', strides=(2, 2), input_shape=(40, 96, 1)))
model.add(Conv2D(64, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Conv2D(32, kernel_size=2, activation='relu', strides=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=test_generator, validation_steps=30)
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show() | code |
18159050/cell_3 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True)
def get_generator(path):
return train_datagen.flow_from_directory(path, target_size=(40, 96), batch_size=32, class_mode='categorical', color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/train/')
test_generator = get_generator('../input/transmittancy/test/') | code |
72062410/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen_drop_unrated = ramen.copy()
ramen_convert_unrated = ramen.copy()
ramen_drop_unrated = ramen_drop_unrated[ramen_drop_unrated['Stars'] != 'Unrated']
ramen_drop_unrated.groupby('Style')['rating'].mean()
ramen_drop_unrated.groupby('Country')['rating'].mean().sort_values() | code |
72062410/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen['Stars'].value_counts() | code |
72062410/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen[ramen['Country'] == 'Japan'] | code |
72062410/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen_drop_unrated = ramen.copy()
ramen_convert_unrated = ramen.copy()
ramen_drop_unrated = ramen_drop_unrated[ramen_drop_unrated['Stars'] != 'Unrated']
ramen_drop_unrated.groupby('Style')['rating'].mean()
ramen_drop_unrated.groupby('Country')['rating'].mean().sort_values()
ramen_drop_unrated.groupby('Brand')['rating'].count().sort_values(ascending=False)[:25]
ramen_drop_unrated.groupby('Brand').agg({'rating': ['mean', 'count']}).sort_values([('rating', 'mean')], ascending=False)[:25] | code |
72062410/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen_drop_unrated = ramen.copy()
ramen_convert_unrated = ramen.copy()
ramen_convert_unrated.groupby('Style')['rating'].mean() | code |
72062410/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ramen = pd.read_csv('../input/ramen-ratings/ramen-ratings.csv')
ramen.head() | code |
72062410/cell_2 | [
"text_plain_output_1.png"
] | farbe = 'grün'
print(farbe) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.