path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
104127064/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
df['Sex'].unique() | code |
104127064/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data
data.drop(['Age'], axis=1, inplace=True)
from sklearn.utils import shuffle
shuffle_data = shuffle(data, random_state=42)
shuffle_data
x = shuffle_data.drop('Survived', axis=1)
y = shuffle_data['Survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
lo = LogisticRegression()
lo.fit(x_train, y_train)
y_pred = lo.predict(x_test)
plt.scatter(y_test, y_pred) | code |
104127064/cell_31 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data
data.drop(['Age'], axis=1, inplace=True)
from sklearn.utils import shuffle
shuffle_data = shuffle(data, random_state=42)
shuffle_data
x = shuffle_data.drop('Survived', axis=1)
y = shuffle_data['Survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
(x_train.shape, x_test.shape, y_train.shape, y_test.shape) | code |
104127064/cell_24 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data | code |
104127064/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum() | code |
104127064/cell_27 | [
"text_html_output_1.png"
] | from sklearn.utils import shuffle
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data
data.drop(['Age'], axis=1, inplace=True)
from sklearn.utils import shuffle
shuffle_data = shuffle(data, random_state=42)
shuffle_data | code |
104127064/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df
df.drop(['Pclass', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.isna().sum()
round(df.isna().sum() / len(df.index) * 100, 2)
round(df.isna().sum() / len(df.index) * 100, 2)
categrocal_col = df.select_dtypes(exclude=np.number)
categrocal_col
labels_ = ['child', 'young', 'teenage', 'adult', 'old']
bins_ = [0, 10, 18, 28, 45, 80]
df['Age'] = pd.cut(df['Age'], bins=bins_, labels=labels_)
age = pd.get_dummies(df['Age'])
data = pd.concat([df, age], axis=1)
data
data.drop(['Age'], axis=1, inplace=True)
from sklearn.utils import shuffle
shuffle_data = shuffle(data, random_state=42)
shuffle_data
x = shuffle_data.drop('Survived', axis=1)
y = shuffle_data['Survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
lo = LogisticRegression()
lo.fit(x_train, y_train)
y_pred = lo.predict(x_test)
from sklearn.metrics import confusion_matrix
cm = pd.DataFrame(confusion_matrix(y_test, y_pred), columns=['predicted yes', 'predicted no'], index=['actual yes', 'actual no'])
cm | code |
104127064/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df | code |
74050138/cell_13 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, BatchNormalization, Activation,Softmax
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import numpy as np
import os
import seaborn as sns
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def LoadImage(name, path):
img = Image.open(os.path.join(path, name))
img = np.array(img)
image = img[:, :256]
mask = img[:, 256:]
return (image, mask)
def bin_image(mask):
bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])
new_mask = np.digitize(mask, bins)
return new_mask
def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT):
seg_labels = np.zeros((height, width, classes))
img = image[:, :, 0]
for c in range(classes):
seg_labels[:, :, c] = (img == c).astype(int)
return seg_labels
def give_color_to_seg_img(seg, n_classes=N_CLASSES):
seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')
colors = sns.color_palette('hls', n_classes)
for c in range(n_classes):
segc = seg == c
seg_img[:, :, 0] += segc * colors[c][0]
seg_img[:, :, 1] += segc * colors[c][1]
seg_img[:, :, 2] += segc * colors[c][2]
return seg_img
classes = 13
train_folder = '../input/cityscapes-image-pairs/cityscapes_data/train'
valid_folder = '../input/cityscapes-image-pairs/cityscapes_data/val'
num_of_training_samples = len(os.listdir(train_folder))
num_of_valid_samples = len(os.listdir(valid_folder))
def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES):
files = os.listdir(path)
while True:
for i in range(0, len(files), batch_size):
batch_files = files[i:i + batch_size]
imgs = []
segs = []
for file in batch_files:
image, mask = LoadImage(file, path)
mask_binned = bin_image(mask)
labels = getSegmentationArr(mask_binned, classes)
imgs.append(image)
segs.append(labels)
yield (np.array(imgs), np.array(segs))
train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE)
val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE)
def conv2d_block(input_tensor, n_filters, kernel_size=3):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(n_filters=16):
inputs = Input((HEIGHT, WIDTH, 3))
c1 = conv2d_block(inputs, n_filters * 1, kernel_size=3)
p1 = MaxPooling2D((2, 2))(c1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3)
p2 = MaxPooling2D((2, 2))(c2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3)
p4 = MaxPooling2D((2, 2))(c4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3)
p5 = MaxPooling2D((2, 2))(c5)
c6 = conv2d_block(p5, n_filters=n_filters * 32, kernel_size=3)
u7 = Conv2DTranspose(n_filters * 16, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c5])
c7 = conv2d_block(u7, n_filters * 16, kernel_size=3)
u8 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c4])
c8 = conv2d_block(u8, n_filters * 8, kernel_size=3)
u9 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c3])
c9 = conv2d_block(u9, n_filters * 4, kernel_size=3)
u10 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c9)
u10 = concatenate([u10, c2])
c10 = conv2d_block(u10, n_filters * 2, kernel_size=3)
u11 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c10)
u11 = concatenate([u11, c1])
c11 = conv2d_block(u11, n_filters * 1, kernel_size=3)
outputs = Conv2D(13, (1, 1), activation='sigmoid')(c11)
model = Model(inputs, outputs=[outputs])
return model
checkpoint = ModelCheckpoint('unet_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model = get_unet()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
TRAIN_STEPS = num_of_training_samples // BATCH_SIZE + 1
VAL_STEPS = num_of_valid_samples // BATCH_SIZE + 1
results = model.fit(train_gen, validation_data=val_gen, steps_per_epoch=TRAIN_STEPS, validation_steps=VAL_STEPS, epochs=EPOCHS, callbacks=checkpoint) | code |
74050138/cell_6 | [
"image_output_2.png",
"image_output_1.png"
] | from PIL import Image
import numpy as np
import os
import seaborn as sns
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def LoadImage(name, path):
img = Image.open(os.path.join(path, name))
img = np.array(img)
image = img[:, :256]
mask = img[:, 256:]
return (image, mask)
def bin_image(mask):
bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])
new_mask = np.digitize(mask, bins)
return new_mask
def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT):
seg_labels = np.zeros((height, width, classes))
img = image[:, :, 0]
for c in range(classes):
seg_labels[:, :, c] = (img == c).astype(int)
return seg_labels
def give_color_to_seg_img(seg, n_classes=N_CLASSES):
seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')
colors = sns.color_palette('hls', n_classes)
for c in range(n_classes):
segc = seg == c
seg_img[:, :, 0] += segc * colors[c][0]
seg_img[:, :, 1] += segc * colors[c][1]
seg_img[:, :, 2] += segc * colors[c][2]
return seg_img
classes = 13
train_folder = '../input/cityscapes-image-pairs/cityscapes_data/train'
valid_folder = '../input/cityscapes-image-pairs/cityscapes_data/val'
num_of_training_samples = len(os.listdir(train_folder))
num_of_valid_samples = len(os.listdir(valid_folder))
def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES):
files = os.listdir(path)
while True:
for i in range(0, len(files), batch_size):
batch_files = files[i:i + batch_size]
imgs = []
segs = []
for file in batch_files:
image, mask = LoadImage(file, path)
mask_binned = bin_image(mask)
labels = getSegmentationArr(mask_binned, classes)
imgs.append(image)
segs.append(labels)
yield (np.array(imgs), np.array(segs))
train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE)
val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE)
imgs, segs = next(train_gen)
(imgs.shape, segs.shape) | code |
74050138/cell_11 | [
"text_plain_output_1.png"
] | from keras.layers import Input, BatchNormalization, Activation,Softmax
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def conv2d_block(input_tensor, n_filters, kernel_size=3):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(n_filters=16):
inputs = Input((HEIGHT, WIDTH, 3))
c1 = conv2d_block(inputs, n_filters * 1, kernel_size=3)
p1 = MaxPooling2D((2, 2))(c1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3)
p2 = MaxPooling2D((2, 2))(c2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3)
p4 = MaxPooling2D((2, 2))(c4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3)
p5 = MaxPooling2D((2, 2))(c5)
c6 = conv2d_block(p5, n_filters=n_filters * 32, kernel_size=3)
u7 = Conv2DTranspose(n_filters * 16, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c5])
c7 = conv2d_block(u7, n_filters * 16, kernel_size=3)
u8 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c4])
c8 = conv2d_block(u8, n_filters * 8, kernel_size=3)
u9 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c3])
c9 = conv2d_block(u9, n_filters * 4, kernel_size=3)
u10 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c9)
u10 = concatenate([u10, c2])
c10 = conv2d_block(u10, n_filters * 2, kernel_size=3)
u11 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c10)
u11 = concatenate([u11, c1])
c11 = conv2d_block(u11, n_filters * 1, kernel_size=3)
outputs = Conv2D(13, (1, 1), activation='sigmoid')(c11)
model = Model(inputs, outputs=[outputs])
return model
model = get_unet()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary() | code |
74050138/cell_7 | [
"text_plain_output_1.png"
] | from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def LoadImage(name, path):
img = Image.open(os.path.join(path, name))
img = np.array(img)
image = img[:, :256]
mask = img[:, 256:]
return (image, mask)
def bin_image(mask):
bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])
new_mask = np.digitize(mask, bins)
return new_mask
def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT):
seg_labels = np.zeros((height, width, classes))
img = image[:, :, 0]
for c in range(classes):
seg_labels[:, :, c] = (img == c).astype(int)
return seg_labels
def give_color_to_seg_img(seg, n_classes=N_CLASSES):
seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')
colors = sns.color_palette('hls', n_classes)
for c in range(n_classes):
segc = seg == c
seg_img[:, :, 0] += segc * colors[c][0]
seg_img[:, :, 1] += segc * colors[c][1]
seg_img[:, :, 2] += segc * colors[c][2]
return seg_img
classes = 13
train_folder = '../input/cityscapes-image-pairs/cityscapes_data/train'
valid_folder = '../input/cityscapes-image-pairs/cityscapes_data/val'
num_of_training_samples = len(os.listdir(train_folder))
num_of_valid_samples = len(os.listdir(valid_folder))
def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES):
files = os.listdir(path)
while True:
for i in range(0, len(files), batch_size):
batch_files = files[i:i + batch_size]
imgs = []
segs = []
for file in batch_files:
image, mask = LoadImage(file, path)
mask_binned = bin_image(mask)
labels = getSegmentationArr(mask_binned, classes)
imgs.append(image)
segs.append(labels)
yield (np.array(imgs), np.array(segs))
train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE)
val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE)
imgs, segs = next(train_gen)
(imgs.shape, segs.shape)
image = imgs[2]
mask = give_color_to_seg_img(np.argmax(segs[2], axis=-1))
masked_image = cv2.addWeighted(image / 255, 0.5, mask, 0.5, 0)
fig, axs = plt.subplots(1, 3, figsize=(20, 20))
axs[0].imshow(image)
axs[0].set_title('Original Image')
axs[1].imshow(mask)
axs[1].set_title('Segmentation Mask')
axs[2].imshow(masked_image)
axs[2].set_title('Masked Image')
plt.show() | code |
74050138/cell_15 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, BatchNormalization, Activation,Softmax
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def LoadImage(name, path):
img = Image.open(os.path.join(path, name))
img = np.array(img)
image = img[:, :256]
mask = img[:, 256:]
return (image, mask)
def bin_image(mask):
bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])
new_mask = np.digitize(mask, bins)
return new_mask
def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT):
seg_labels = np.zeros((height, width, classes))
img = image[:, :, 0]
for c in range(classes):
seg_labels[:, :, c] = (img == c).astype(int)
return seg_labels
def give_color_to_seg_img(seg, n_classes=N_CLASSES):
seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')
colors = sns.color_palette('hls', n_classes)
for c in range(n_classes):
segc = seg == c
seg_img[:, :, 0] += segc * colors[c][0]
seg_img[:, :, 1] += segc * colors[c][1]
seg_img[:, :, 2] += segc * colors[c][2]
return seg_img
classes = 13
train_folder = '../input/cityscapes-image-pairs/cityscapes_data/train'
valid_folder = '../input/cityscapes-image-pairs/cityscapes_data/val'
num_of_training_samples = len(os.listdir(train_folder))
num_of_valid_samples = len(os.listdir(valid_folder))
def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES):
files = os.listdir(path)
while True:
for i in range(0, len(files), batch_size):
batch_files = files[i:i + batch_size]
imgs = []
segs = []
for file in batch_files:
image, mask = LoadImage(file, path)
mask_binned = bin_image(mask)
labels = getSegmentationArr(mask_binned, classes)
imgs.append(image)
segs.append(labels)
yield (np.array(imgs), np.array(segs))
train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE)
val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE)
imgs, segs = next(train_gen)
(imgs.shape, segs.shape)
image = imgs[2]
mask = give_color_to_seg_img(np.argmax(segs[2], axis=-1))
masked_image = cv2.addWeighted(image/255, 0.5, mask, 0.5, 0)
fig, axs = plt.subplots(1, 3, figsize=(20,20))
axs[0].imshow(image)
axs[0].set_title('Original Image')
axs[1].imshow(mask)
axs[1].set_title('Segmentation Mask')
axs[2].imshow(masked_image)
axs[2].set_title('Masked Image')
plt.show()
def conv2d_block(input_tensor, n_filters, kernel_size=3):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(n_filters=16):
inputs = Input((HEIGHT, WIDTH, 3))
c1 = conv2d_block(inputs, n_filters * 1, kernel_size=3)
p1 = MaxPooling2D((2, 2))(c1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3)
p2 = MaxPooling2D((2, 2))(c2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3)
p4 = MaxPooling2D((2, 2))(c4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3)
p5 = MaxPooling2D((2, 2))(c5)
c6 = conv2d_block(p5, n_filters=n_filters * 32, kernel_size=3)
u7 = Conv2DTranspose(n_filters * 16, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c5])
c7 = conv2d_block(u7, n_filters * 16, kernel_size=3)
u8 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c4])
c8 = conv2d_block(u8, n_filters * 8, kernel_size=3)
u9 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c3])
c9 = conv2d_block(u9, n_filters * 4, kernel_size=3)
u10 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c9)
u10 = concatenate([u10, c2])
c10 = conv2d_block(u10, n_filters * 2, kernel_size=3)
u11 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c10)
u11 = concatenate([u11, c1])
c11 = conv2d_block(u11, n_filters * 1, kernel_size=3)
outputs = Conv2D(13, (1, 1), activation='sigmoid')(c11)
model = Model(inputs, outputs=[outputs])
return model
checkpoint = ModelCheckpoint('unet_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model = get_unet()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
TRAIN_STEPS = num_of_training_samples // BATCH_SIZE + 1
VAL_STEPS = num_of_valid_samples // BATCH_SIZE + 1
results = model.fit(train_gen, validation_data=val_gen, steps_per_epoch=TRAIN_STEPS, validation_steps=VAL_STEPS, epochs=EPOCHS, callbacks=checkpoint)
max_show = 2
imgs, segs = next(val_gen)
pred = model.predict(imgs)
for i in range(max_show):
_p = give_color_to_seg_img(np.argmax(pred[i], axis=-1))
_s = give_color_to_seg_img(np.argmax(segs[i], axis=-1))
predimg = cv2.addWeighted(imgs[i] / 255, 0.5, _p, 0.5, 0)
trueimg = cv2.addWeighted(imgs[i] / 255, 0.5, _s, 0.5, 0)
plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.title('Prediction')
plt.imshow(predimg)
plt.axis('off')
plt.subplot(122)
plt.title('Original')
plt.imshow(trueimg)
plt.axis('off')
plt.tight_layout()
plt.savefig('pred_' + str(i) + '.png', dpi=150)
plt.show() | code |
74050138/cell_14 | [
"image_output_1.png"
] | from PIL import Image
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, BatchNormalization, Activation,Softmax
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def LoadImage(name, path):
img = Image.open(os.path.join(path, name))
img = np.array(img)
image = img[:, :256]
mask = img[:, 256:]
return (image, mask)
def bin_image(mask):
bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])
new_mask = np.digitize(mask, bins)
return new_mask
def getSegmentationArr(image, classes, width=WIDTH, height=HEIGHT):
seg_labels = np.zeros((height, width, classes))
img = image[:, :, 0]
for c in range(classes):
seg_labels[:, :, c] = (img == c).astype(int)
return seg_labels
def give_color_to_seg_img(seg, n_classes=N_CLASSES):
seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')
colors = sns.color_palette('hls', n_classes)
for c in range(n_classes):
segc = seg == c
seg_img[:, :, 0] += segc * colors[c][0]
seg_img[:, :, 1] += segc * colors[c][1]
seg_img[:, :, 2] += segc * colors[c][2]
return seg_img
classes = 13
train_folder = '../input/cityscapes-image-pairs/cityscapes_data/train'
valid_folder = '../input/cityscapes-image-pairs/cityscapes_data/val'
num_of_training_samples = len(os.listdir(train_folder))
num_of_valid_samples = len(os.listdir(valid_folder))
def DataGenerator(path, batch_size=BATCH_SIZE, classes=N_CLASSES):
files = os.listdir(path)
while True:
for i in range(0, len(files), batch_size):
batch_files = files[i:i + batch_size]
imgs = []
segs = []
for file in batch_files:
image, mask = LoadImage(file, path)
mask_binned = bin_image(mask)
labels = getSegmentationArr(mask_binned, classes)
imgs.append(image)
segs.append(labels)
yield (np.array(imgs), np.array(segs))
train_gen = DataGenerator(train_folder, batch_size=BATCH_SIZE)
val_gen = DataGenerator(valid_folder, batch_size=BATCH_SIZE)
imgs, segs = next(train_gen)
(imgs.shape, segs.shape)
image = imgs[2]
mask = give_color_to_seg_img(np.argmax(segs[2], axis=-1))
masked_image = cv2.addWeighted(image/255, 0.5, mask, 0.5, 0)
fig, axs = plt.subplots(1, 3, figsize=(20,20))
axs[0].imshow(image)
axs[0].set_title('Original Image')
axs[1].imshow(mask)
axs[1].set_title('Segmentation Mask')
axs[2].imshow(masked_image)
axs[2].set_title('Masked Image')
plt.show()
def conv2d_block(input_tensor, n_filters, kernel_size=3):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(n_filters=16):
inputs = Input((HEIGHT, WIDTH, 3))
c1 = conv2d_block(inputs, n_filters * 1, kernel_size=3)
p1 = MaxPooling2D((2, 2))(c1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3)
p2 = MaxPooling2D((2, 2))(c2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3)
p4 = MaxPooling2D((2, 2))(c4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3)
p5 = MaxPooling2D((2, 2))(c5)
c6 = conv2d_block(p5, n_filters=n_filters * 32, kernel_size=3)
u7 = Conv2DTranspose(n_filters * 16, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c5])
c7 = conv2d_block(u7, n_filters * 16, kernel_size=3)
u8 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c4])
c8 = conv2d_block(u8, n_filters * 8, kernel_size=3)
u9 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c3])
c9 = conv2d_block(u9, n_filters * 4, kernel_size=3)
u10 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c9)
u10 = concatenate([u10, c2])
c10 = conv2d_block(u10, n_filters * 2, kernel_size=3)
u11 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c10)
u11 = concatenate([u11, c1])
c11 = conv2d_block(u11, n_filters * 1, kernel_size=3)
outputs = Conv2D(13, (1, 1), activation='sigmoid')(c11)
model = Model(inputs, outputs=[outputs])
return model
checkpoint = ModelCheckpoint('unet_model.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model = get_unet()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
TRAIN_STEPS = num_of_training_samples // BATCH_SIZE + 1
VAL_STEPS = num_of_valid_samples // BATCH_SIZE + 1
results = model.fit(train_gen, validation_data=val_gen, steps_per_epoch=TRAIN_STEPS, validation_steps=VAL_STEPS, epochs=EPOCHS, callbacks=checkpoint)
plt.figure(figsize=(8, 8))
plt.title('Learning curve')
plt.plot(results.history['loss'], label='loss')
plt.plot(results.history['val_loss'], label='val_loss')
plt.plot(np.argmin(results.history['val_loss']), np.min(results.history['val_loss']), marker='x', color='r', label='best model')
plt.xlabel('Epochs')
plt.ylabel('log_loss')
plt.legend() | code |
74050138/cell_12 | [
"image_output_1.png"
] | from keras.layers import Input, BatchNormalization, Activation,Softmax
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.merge import concatenate
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import tensorflow as tf
EPOCHS = 10
BATCH_SIZE = 17
HEIGHT = 256
WIDTH = 256
N_CLASSES = 13
def conv2d_block(input_tensor, n_filters, kernel_size=3):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal', padding='same')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(n_filters=16):
inputs = Input((HEIGHT, WIDTH, 3))
c1 = conv2d_block(inputs, n_filters * 1, kernel_size=3)
p1 = MaxPooling2D((2, 2))(c1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3)
p2 = MaxPooling2D((2, 2))(c2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3)
p4 = MaxPooling2D((2, 2))(c4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3)
p5 = MaxPooling2D((2, 2))(c5)
c6 = conv2d_block(p5, n_filters=n_filters * 32, kernel_size=3)
u7 = Conv2DTranspose(n_filters * 16, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c5])
c7 = conv2d_block(u7, n_filters * 16, kernel_size=3)
u8 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c4])
c8 = conv2d_block(u8, n_filters * 8, kernel_size=3)
u9 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c3])
c9 = conv2d_block(u9, n_filters * 4, kernel_size=3)
u10 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c9)
u10 = concatenate([u10, c2])
c10 = conv2d_block(u10, n_filters * 2, kernel_size=3)
u11 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c10)
u11 = concatenate([u11, c1])
c11 = conv2d_block(u11, n_filters * 1, kernel_size=3)
outputs = Conv2D(13, (1, 1), activation='sigmoid')(c11)
model = Model(inputs, outputs=[outputs])
return model
model = get_unet()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
tf.keras.utils.plot_model(model=model, show_shapes=True, to_file='/kaggle/working/UNet Model.png') | code |
18153349/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data = pd.read_csv('../input/pokemon.csv')
data[np.logical_and(data['Speed'] > 145, data['Attack'] > 100)] | code |
18153349/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data.plot(kind='scatter', x='Speed', y='Attack', alpha=0.5, color='red')
plt.xlabel('Speed')
plt.ylabel('Attack')
plt.title('Speed Attack Scatter Plot') | code |
18153349/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head() | code |
18153349/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.head() | code |
18153349/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
dictionary = {'1': 'Istanbul', '2': 'Izmır', '3': 'Ankara', '4': 'London', '5': 'Boston'}
dictionary.clear()
data = pd.read_csv('../input/pokemon.csv')
lis = [1, 2, 3, 4, 5]
for i in lis:
print('i is :', i)
print('')
for index, value in enumerate(lis):
print(index, ':', value)
dictionary = {'Turkey': 'Ankara', 'England': 'Londra'}
for key, value in dictionary.items():
print(key, ':', value)
print('')
for index, value in data[['Speed']][0:1].iterrows():
print(index, ':', value) | code |
18153349/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data = pd.read_csv('../input/pokemon.csv')
x = data['Speed'] > 150
data[x] | code |
18153349/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns | code |
18153349/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data.head(2) | code |
18153349/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data = pd.read_csv('../input/pokemon.csv')
series = data['Speed']
print(type(series))
data_frame = data[['Speed']]
print(type(data_frame)) | code |
18153349/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
print(os.listdir('../input')) | code |
18153349/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes | code |
18153349/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.describe() | code |
18153349/cell_15 | [
"text_html_output_1.png"
] | dictionary = {'1': 'Istanbul', '2': 'Izmır', '3': 'Ankara', '4': 'London', '5': 'Boston'}
print(dictionary.keys())
print(dictionary.values()) | code |
18153349/cell_16 | [
"text_html_output_1.png"
] | dictionary = {'1': 'Istanbul', '2': 'Izmır', '3': 'Ankara', '4': 'London', '5': 'Boston'}
dictionary['1'] = 'Bursa'
dictionary['6'] = 'İstanbul'
print(dictionary)
del dictionary['3']
print(dictionary)
print('3' in dictionary) | code |
18153349/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.info() | code |
18153349/cell_17 | [
"image_output_1.png"
] | dictionary = {'1': 'Istanbul', '2': 'Izmır', '3': 'Ankara', '4': 'London', '5': 'Boston'}
dictionary.clear()
print(dictionary) | code |
18153349/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data.Attack.plot(kind='hist', bins=50, figsize=(12, 12))
plt.show() | code |
18153349/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data = pd.read_csv('../input/pokemon.csv')
data[(data['Speed'] > 145) & (data['Attack'] > 100)] | code |
18153349/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
f, ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt='.1f', ax=ax)
plt.show() | code |
18153349/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # visualization tool
data = pd.read_csv('../input/pokemon.csv')
data.shape
data.columns
data.dtypes
data.sort_values(by='Attack', ascending=False).head()
#correlation map
f,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(data.corr(), annot=True, linewidths=1, fmt= '.1f',ax=ax)
plt.show()
data.Attack.plot(kind='line', color='blue', label='Attack', linewidth=1, alpha=0.5, grid=True, linestyle=':')
data.Speed.plot(kind='line', color='red', label='Speed', linewidth=1, alpha=0.5, grid=True, linestyle='-.') | code |
18153349/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/pokemon.csv')
data.shape | code |
89133083/cell_4 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
import os
import pandas as pd
def breaker(num: int=50, char: str='*') -> None:
pass
def preprocess(image: np.ndarray, size: int) -> np.ndarray:
return cv2.resize(src=cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB), dsize=(size, size), interpolation=cv2.INTER_AREA)
def get_images(path: str, names: np.ndarray, size: int) -> np.ndarray:
images = np.zeros((len(names), size, size, 3), dtype=np.uint8)
i = 0
for name in names:
images[i] = preprocess(cv2.imread(os.path.join(path, name + '.jpg'), cv2.IMREAD_COLOR), size)
i += 1
return images
def save(train_images: np.ndarray, test_images: np.ndarray, targets: np.ndarray, size: int) -> None:
np.save(f'./train_images_{size}.npy', train_images)
np.save(f'./test_images_{size}.npy', test_images)
np.save(f'./targets_{size}.npy', targets)
def get_statistics(images: np.ndarray, size: int) -> None:
breaker()
breaker()
breaker()
train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv')
ss_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/sample_submission.csv')
train_filenames = train_df['image_id'].copy().values
test_filenames = ss_df['image_id'].copy().values
targets = train_df.iloc[:, 1:].copy().values
size = 224
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size) | code |
89133083/cell_6 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
import os
import pandas as pd
def breaker(num: int=50, char: str='*') -> None:
pass
def preprocess(image: np.ndarray, size: int) -> np.ndarray:
return cv2.resize(src=cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB), dsize=(size, size), interpolation=cv2.INTER_AREA)
def get_images(path: str, names: np.ndarray, size: int) -> np.ndarray:
images = np.zeros((len(names), size, size, 3), dtype=np.uint8)
i = 0
for name in names:
images[i] = preprocess(cv2.imread(os.path.join(path, name + '.jpg'), cv2.IMREAD_COLOR), size)
i += 1
return images
def save(train_images: np.ndarray, test_images: np.ndarray, targets: np.ndarray, size: int) -> None:
np.save(f'./train_images_{size}.npy', train_images)
np.save(f'./test_images_{size}.npy', test_images)
np.save(f'./targets_{size}.npy', targets)
def get_statistics(images: np.ndarray, size: int) -> None:
breaker()
breaker()
breaker()
train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv')
ss_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/sample_submission.csv')
train_filenames = train_df['image_id'].copy().values
test_filenames = ss_df['image_id'].copy().values
targets = train_df.iloc[:, 1:].copy().values
size = 224
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 320
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 384
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size) | code |
89133083/cell_7 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
import os
import pandas as pd
def breaker(num: int=50, char: str='*') -> None:
pass
def preprocess(image: np.ndarray, size: int) -> np.ndarray:
return cv2.resize(src=cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB), dsize=(size, size), interpolation=cv2.INTER_AREA)
def get_images(path: str, names: np.ndarray, size: int) -> np.ndarray:
images = np.zeros((len(names), size, size, 3), dtype=np.uint8)
i = 0
for name in names:
images[i] = preprocess(cv2.imread(os.path.join(path, name + '.jpg'), cv2.IMREAD_COLOR), size)
i += 1
return images
def save(train_images: np.ndarray, test_images: np.ndarray, targets: np.ndarray, size: int) -> None:
np.save(f'./train_images_{size}.npy', train_images)
np.save(f'./test_images_{size}.npy', test_images)
np.save(f'./targets_{size}.npy', targets)
def get_statistics(images: np.ndarray, size: int) -> None:
breaker()
breaker()
breaker()
train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv')
ss_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/sample_submission.csv')
train_filenames = train_df['image_id'].copy().values
test_filenames = ss_df['image_id'].copy().values
targets = train_df.iloc[:, 1:].copy().values
size = 224
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 320
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 384
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 512
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size) | code |
89133083/cell_5 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
import os
import pandas as pd
def breaker(num: int=50, char: str='*') -> None:
pass
def preprocess(image: np.ndarray, size: int) -> np.ndarray:
return cv2.resize(src=cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB), dsize=(size, size), interpolation=cv2.INTER_AREA)
def get_images(path: str, names: np.ndarray, size: int) -> np.ndarray:
images = np.zeros((len(names), size, size, 3), dtype=np.uint8)
i = 0
for name in names:
images[i] = preprocess(cv2.imread(os.path.join(path, name + '.jpg'), cv2.IMREAD_COLOR), size)
i += 1
return images
def save(train_images: np.ndarray, test_images: np.ndarray, targets: np.ndarray, size: int) -> None:
np.save(f'./train_images_{size}.npy', train_images)
np.save(f'./test_images_{size}.npy', test_images)
np.save(f'./targets_{size}.npy', targets)
def get_statistics(images: np.ndarray, size: int) -> None:
breaker()
breaker()
breaker()
train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv')
ss_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/sample_submission.csv')
train_filenames = train_df['image_id'].copy().values
test_filenames = ss_df['image_id'].copy().values
targets = train_df.iloc[:, 1:].copy().values
size = 224
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size)
size = 320
train_images = get_images('../input/plant-pathology-2020-fgvc7/images', train_filenames, size)
test_images = get_images('../input/plant-pathology-2020-fgvc7/images', test_filenames, size)
save(train_images, test_images, targets, size)
get_statistics(train_images, size) | code |
106200991/cell_7 | [
"image_output_1.png"
] | from glob import glob
import matplotlib.pylab as plt
import pandas as pd
train_img = glob('../input/kaggle-pog-series-s01e03/corn/train/*.png')
test_img = glob('../input/kaggle-pog-series-s01e03/corn/test/*.png')
train_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/train.csv')
test_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/test.csv')
def get_index(view,label):
return train_df[(train_df.view==view) & (train_df.label==label)].index[0]
def plots(label):
# top
img_mpl1 = plt.imread(train_img[get_index('top',label)])
ax = pd.Series(img_mpl1.flatten()).rename(f'top {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
ax.set_title(f'{label.capitalize()} Corn',pad=40)
ax.title.set_size(28)
fig = ax.get_figure()
fig.tight_layout()
# bottom broken
img_mpl2 = plt.imread(train_img[get_index('bottom',label)])
pd.Series(img_mpl2.flatten()).rename(f'bottom {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
# pictures
fig, axs= plt.subplots(1,2,figsize=(8,8))
axs[0].imshow(img_mpl1)
axs[1].imshow(img_mpl2)
axs[0].set_title('Top')
axs[0].title.set_size(20)
axs[1].set_title('Bottom')
axs[1].title.set_size(20)
plt.show()
plots('broken')
plots('pure')
plots('discolored')
plots('silkcut') | code |
106200991/cell_8 | [
"image_output_1.png"
] | from glob import glob
import matplotlib.pylab as plt
import pandas as pd
train_img = glob('../input/kaggle-pog-series-s01e03/corn/train/*.png')
test_img = glob('../input/kaggle-pog-series-s01e03/corn/test/*.png')
train_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/train.csv')
test_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/test.csv')
def get_index(view,label):
return train_df[(train_df.view==view) & (train_df.label==label)].index[0]
def plots(label):
# top
img_mpl1 = plt.imread(train_img[get_index('top',label)])
ax = pd.Series(img_mpl1.flatten()).rename(f'top {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
ax.set_title(f'{label.capitalize()} Corn',pad=40)
ax.title.set_size(28)
fig = ax.get_figure()
fig.tight_layout()
# bottom broken
img_mpl2 = plt.imread(train_img[get_index('bottom',label)])
pd.Series(img_mpl2.flatten()).rename(f'bottom {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
# pictures
fig, axs= plt.subplots(1,2,figsize=(8,8))
axs[0].imshow(img_mpl1)
axs[1].imshow(img_mpl2)
axs[0].set_title('Top')
axs[0].title.set_size(20)
axs[1].set_title('Bottom')
axs[1].title.set_size(20)
plt.show()
ax = train_df.groupby(['label'])['image'].count().plot(kind='pie', figsize=(10, 10), title='Corn Type Ratio', autopct='%1.1f%%', shadow=True, fontsize=15)
ax.title.set_size(25) | code |
106200991/cell_10 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
import matplotlib.pylab as plt
import pandas as pd
train_img = glob('../input/kaggle-pog-series-s01e03/corn/train/*.png')
test_img = glob('../input/kaggle-pog-series-s01e03/corn/test/*.png')
train_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/train.csv')
test_df = pd.read_csv('../input/kaggle-pog-series-s01e03/corn/test.csv')
def get_index(view,label):
return train_df[(train_df.view==view) & (train_df.label==label)].index[0]
def plots(label):
# top
img_mpl1 = plt.imread(train_img[get_index('top',label)])
ax = pd.Series(img_mpl1.flatten()).rename(f'top {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
ax.set_title(f'{label.capitalize()} Corn',pad=40)
ax.title.set_size(28)
fig = ax.get_figure()
fig.tight_layout()
# bottom broken
img_mpl2 = plt.imread(train_img[get_index('bottom',label)])
pd.Series(img_mpl2.flatten()).rename(f'bottom {label.capitalize()}').plot(kind='hist',bins=50,legend=True)
# pictures
fig, axs= plt.subplots(1,2,figsize=(8,8))
axs[0].imshow(img_mpl1)
axs[1].imshow(img_mpl2)
axs[0].set_title('Top')
axs[0].title.set_size(20)
axs[1].set_title('Bottom')
axs[1].title.set_size(20)
plt.show()
ax = train_df.groupby(['label'])['image'].count().plot(kind='pie',figsize=(10,10),title='Corn Type Ratio',autopct='%1.1f%%',shadow=True,fontsize=15);
ax.title.set_size(25)
ax = train_df.groupby(['label', 'view'])['image'].count().plot(kind='pie', figsize=(10, 10), title='Corn Type and View Ratio', autopct='%1.1f%%', shadow=True, fontsize=15)
ax.title.set_size(25) | code |
74045159/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
plt.figure(figsize=(12, 10))
sns.heatmap(cor) | code |
74045159/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
train_df['date'] = train_df['date'].str.replace('T000000', '')
train_df | code |
74045159/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df
X_test = slct_test_df.to_numpy()
X_test | code |
74045159/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df
X = slct_train_df.to_numpy()
X
y = train_df['price'].to_numpy()
log1p_y = np.log1p(y)
log1p_y
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X, log1p_y)
X_test = slct_test_df.to_numpy()
X_test
log1p_p = model.predict(X_test)
log1p_p
p = np.expm1(log1p_p)
p | code |
74045159/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
test_df['date'] = test_df['date'].str.replace('T000000', '')
test_df | code |
74045159/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df
for c in slct_test_df.columns:
print(c, slct_test_df[c].isna().sum()) | code |
74045159/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74045159/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
for c in slct_train_df.columns:
print(c, slct_train_df[c].isna().sum()) | code |
74045159/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
X = slct_train_df.to_numpy()
X
y = train_df['price'].to_numpy()
log1p_y = np.log1p(y)
log1p_y
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X, log1p_y)
print('result:{}'.format(model.score(X, log1p_y))) | code |
74045159/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df | code |
74045159/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df | code |
74045159/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df | code |
74045159/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
submit_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/sample_submission.csv', index_col=0)
submit_df | code |
74045159/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df
X = slct_train_df.to_numpy()
X
y = train_df['price'].to_numpy()
log1p_y = np.log1p(y)
log1p_y
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X, log1p_y)
X_test = slct_test_df.to_numpy()
X_test
log1p_p = model.predict(X_test)
log1p_p | code |
74045159/cell_24 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
y = train_df['price'].to_numpy()
log1p_y = np.log1p(y)
log1p_y | code |
74045159/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
X = slct_train_df.to_numpy()
X | code |
74045159/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df
X = slct_train_df.to_numpy()
X
y = train_df['price'].to_numpy()
log1p_y = np.log1p(y)
log1p_y
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X, log1p_y)
X_test = slct_test_df.to_numpy()
X_test
log1p_p = model.predict(X_test)
log1p_p
p = np.expm1(log1p_p)
p
submit_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/sample_submission.csv', index_col=0)
submit_df
submit_df['price'] = p
submit_df | code |
74045159/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df | code |
88075597/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
df_mod.dropna(inplace=True)
df_mod.Complain.value_counts()
df_mod.drop(columns=['Complain'], inplace=True)
Discount_matrix = df_mod[sorted([i for i in df_mod.columns if i.startswith('Acc')])]
l = {}
k = []
for i in range(len(Discount_matrix)):
for j in Discount_matrix.columns:
if Discount_matrix.iloc[i][j] != 0:
l[i] = int(j[-1])
break
else:
l[i] = 0
df_mod['No_first_accepted'] = l.values()
df_mod['total_accepted'] = np.sum(Discount_matrix, axis=1)
df_mod['accepted_any'] = np.where(df_mod['total_accepted'] != 0, 1, 0)
df_mod.loc[np.isin(df_mod['Marital_Status'], ['YOLO', 'Absurd', 'Alone']), 'Marital_Status'] = 'Single'
df_mod['Marital_Status'].value_counts()
fig, axs = plt.subplots(1,3, figsize=(12,6))
fig.patch.set_facecolor('white')
sns.histplot(df_mod, x="No_first_accepted", ax=axs[0])
sns.histplot(df_mod, x="accepted_any", ax=axs[1])
sns.histplot(df_mod, x="total_accepted", ax=axs[2])
fig, axs = plt.subplots(1, 3, figsize=(18, 6))
sns.set_theme(style='whitegrid')
for i, j in enumerate(['Marital_Status', 'Education', 'Kidhome']):
sns.barplot(x=j, y='accepted_any', data=df_mod, ax=axs[i], capsize=0.2)
fig.patch.set_facecolor('white') | code |
88075597/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
df_mod.dropna(inplace=True)
df_mod.Complain.value_counts() | code |
88075597/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
df_mod.dropna(inplace=True)
df_mod.Complain.value_counts()
df_mod.drop(columns=['Complain'], inplace=True)
Discount_matrix = df_mod[sorted([i for i in df_mod.columns if i.startswith('Acc')])]
l = {}
k = []
for i in range(len(Discount_matrix)):
for j in Discount_matrix.columns:
if Discount_matrix.iloc[i][j] != 0:
l[i] = int(j[-1])
break
else:
l[i] = 0
df_mod['No_first_accepted'] = l.values()
df_mod['total_accepted'] = np.sum(Discount_matrix, axis=1)
df_mod['accepted_any'] = np.where(df_mod['total_accepted'] != 0, 1, 0)
df_mod.loc[np.isin(df_mod['Marital_Status'], ['YOLO', 'Absurd', 'Alone']), 'Marital_Status'] = 'Single'
df_mod['Marital_Status'].value_counts()
fig, axs = plt.subplots(1,3, figsize=(12,6))
fig.patch.set_facecolor('white')
sns.histplot(df_mod, x="No_first_accepted", ax=axs[0])
sns.histplot(df_mod, x="accepted_any", ax=axs[1])
sns.histplot(df_mod, x="total_accepted", ax=axs[2])
fig, axs = plt.subplots(1,3, figsize=(18,6))
sns.set_theme(style="whitegrid")
for i,j in enumerate(["Marital_Status", "Education", "Kidhome"]):
sns.barplot(x=j, y="accepted_any", data=df_mod, ax=axs[i], capsize=.2)
fig.patch.set_facecolor('white')
fig, axs = plt.subplots(2, 6, figsize=(24, 12))
fig.patch.set_facecolor('white')
subs = df_mod.loc[df_mod.Income < 200000]
for j, i in enumerate([i for i in df_mod.columns if i.startswith('Mnt')]):
for k in range(2):
cols = ['Recency', 'Income']
sns.scatterplot(x=cols[k], y=i, data=subs, ax=axs[k][j])
spear_corr = stats.spearmanr(df_mod[cols[k]], df_mod[i])
pear_cor = stats.pearsonr(df_mod[cols[k]], df_mod[i])
axs[k][j].set_title('corr: spearm.:{} and pears.:{}'.format(round(spear_corr[0], 3), round(pear_cor[0], 3))) | code |
88075597/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
df_mod.dropna(inplace=True)
df_mod.Complain.value_counts()
df_mod.drop(columns=['Complain'], inplace=True)
Discount_matrix = df_mod[sorted([i for i in df_mod.columns if i.startswith('Acc')])]
l = {}
k = []
for i in range(len(Discount_matrix)):
for j in Discount_matrix.columns:
if Discount_matrix.iloc[i][j] != 0:
l[i] = int(j[-1])
break
else:
l[i] = 0
df_mod['No_first_accepted'] = l.values()
df_mod['total_accepted'] = np.sum(Discount_matrix, axis=1)
df_mod['accepted_any'] = np.where(df_mod['total_accepted'] != 0, 1, 0)
df_mod.loc[np.isin(df_mod['Marital_Status'], ['YOLO', 'Absurd', 'Alone']), 'Marital_Status'] = 'Single'
df_mod['Marital_Status'].value_counts()
fig, axs = plt.subplots(1, 3, figsize=(12, 6))
fig.patch.set_facecolor('white')
sns.histplot(df_mod, x='No_first_accepted', ax=axs[0])
sns.histplot(df_mod, x='accepted_any', ax=axs[1])
sns.histplot(df_mod, x='total_accepted', ax=axs[2]) | code |
88075597/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
for i in df_mod.columns:
if len(df_mod.loc[df_mod[i].isna()]) != 0:
print(i, len(df_mod.loc[df_mod[i].isna()])) | code |
88075597/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact'])
df_mod.dropna(inplace=True)
df_mod.Complain.value_counts()
df_mod.drop(columns=['Complain'], inplace=True)
Discount_matrix = df_mod[sorted([i for i in df_mod.columns if i.startswith('Acc')])]
l = {}
k = []
for i in range(len(Discount_matrix)):
for j in Discount_matrix.columns:
if Discount_matrix.iloc[i][j] != 0:
l[i] = int(j[-1])
break
else:
l[i] = 0
df_mod['No_first_accepted'] = l.values()
df_mod['total_accepted'] = np.sum(Discount_matrix, axis=1)
df_mod['accepted_any'] = np.where(df_mod['total_accepted'] != 0, 1, 0)
df_mod.loc[np.isin(df_mod['Marital_Status'], ['YOLO', 'Absurd', 'Alone']), 'Marital_Status'] = 'Single'
df_mod['Marital_Status'].value_counts() | code |
104117681/cell_4 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi | code |
104117681/cell_6 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a) | code |
104117681/cell_11 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000) | code |
104117681/cell_7 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a) | code |
104117681/cell_18 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000)
math.log(1000, 10)
math.sin(math.pi / 2)
degree = 90
math.sin(math.radians(degree))
math.sqrt(5)
math.factorial(5)
l = [1.2, 2.3, 3.4, 4.5]
math.fsum(l) | code |
104117681/cell_8 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a) | code |
104117681/cell_16 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000)
math.log(1000, 10)
math.sin(math.pi / 2)
degree = 90
math.sin(math.radians(degree))
math.sqrt(5) | code |
104117681/cell_3 | [
"text_plain_output_1.png"
] | import math
math.e | code |
104117681/cell_17 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000)
math.log(1000, 10)
math.sin(math.pi / 2)
degree = 90
math.sin(math.radians(degree))
math.sqrt(5)
math.factorial(5) | code |
104117681/cell_14 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000)
math.log(1000, 10)
math.sin(math.pi / 2)
degree = 90
math.sin(math.radians(degree)) | code |
104117681/cell_10 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x) | code |
104117681/cell_12 | [
"text_plain_output_1.png"
] | import math
math.e
math.pi
a = math.pi
math.ceil(a)
math.floor(a)
math.trunc(a)
x = 7
math.exp(x)
math.log(1000)
math.log(1000, 10) | code |
130013718/cell_21 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
result = []
for i in sol_train.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
soltest = pd.DataFrame()
soltest = soltest.assign(filename=Id)
result = []
for i in soltest.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
soltest = soltest.assign(prediction=result)
soltest.head() | code |
130013718/cell_13 | [
"text_plain_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
result = []
for i in sol_train.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_train = sol_train.assign(prediction=result)
sol_train.head() | code |
130013718/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy'] | code |
130013718/cell_4 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
sol_train.head() | code |
130013718/cell_23 | [
"text_plain_output_1.png"
] | import os
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
soltest = pd.DataFrame()
soltest = soltest.assign(filename=Id)
sol = pd.read_csv('/kaggle/input/cassava-disease-classification/submission.csv')
sol['filename'] = '/kaggle/input/cassava-disease-classification/sol/' + sol['Id']
sol.head() | code |
130013718/cell_20 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
result = []
for i in sol_train.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
soltest = pd.DataFrame()
soltest = soltest.assign(filename=Id)
result = []
for i in soltest.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5] | code |
130013718/cell_6 | [
"text_html_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test.head() | code |
130013718/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train.head() | code |
130013718/cell_11 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_test = sol_test.assign(prediction=result)
sol_test.head() | code |
130013718/cell_19 | [
"text_html_output_1.png"
] | import os
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
soltest = pd.DataFrame()
soltest = soltest.assign(filename=Id)
soltest.head() | code |
130013718/cell_1 | [
"text_plain_output_1.png"
] | import os
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5] | code |
130013718/cell_7 | [
"text_html_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test.head() | code |
130013718/cell_18 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import os
import os
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5] | code |
130013718/cell_8 | [
"text_html_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
sol_test.head() | code |
130013718/cell_15 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
result = []
for i in sol_train.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_train = sol_train.assign(prediction=result)
sol_train['label'] = sol_train['label'].replace({'cbb': 'Cassava Bacterial Blight (CBB)', 'cbsd': 'Cassava Brown Streak Disease (CBSD)', 'cgm': 'Cassava Green Mottle (CGM)', 'cmd': 'Cassava Mosaic Disease (CMD)', 'healthy': 'Healthy'})
sol_train.head() | code |
130013718/cell_16 | [
"text_html_output_1.png"
] | from PIL import Image
from sklearn.metrics import classification_report
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train['label'] = sol_train['label'].str.split('/').str[0]
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
result = []
for i in sol_train.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_train = sol_train.assign(prediction=result)
sol_train['label'] = sol_train['label'].replace({'cbb': 'Cassava Bacterial Blight (CBB)', 'cbsd': 'Cassava Brown Streak Disease (CBSD)', 'cgm': 'Cassava Green Mottle (CGM)', 'cmd': 'Cassava Mosaic Disease (CMD)', 'healthy': 'Healthy'})
from sklearn.metrics import classification_report
print(classification_report(sol_train['label'], sol_train['prediction'])) | code |
130013718/cell_3 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
sol_train['label'] = sol_train['filename']
sol_train['label'] = sol_train['label'].str.replace('/kaggle/input/cassava-disease-classification/train/', '')
sol_train.head() | code |
130013718/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from sklearn.metrics import classification_report
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_test = sol_test.assign(prediction=result)
sol_test['label'] = sol_test['label'].replace({'cbb': 'Cassava Bacterial Blight (CBB)', 'cbsd': 'Cassava Brown Streak Disease (CBSD)', 'cgm': 'Cassava Green Mottle (CGM)', 'cmd': 'Cassava Mosaic Disease (CMD)', 'healthy': 'Healthy'})
print(classification_report(sol_test['label'], sol_test['prediction'])) | code |
130013718/cell_14 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5]
sol_test = sol_test.assign(prediction=result)
sol_test['label'] = sol_test['label'].replace({'cbb': 'Cassava Bacterial Blight (CBB)', 'cbsd': 'Cassava Brown Streak Disease (CBSD)', 'cgm': 'Cassava Green Mottle (CGM)', 'cmd': 'Cassava Mosaic Disease (CMD)', 'healthy': 'Healthy'})
sol_test.head() | code |
130013718/cell_22 | [
"text_html_output_1.png"
] | import os
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/soltest'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
soltest = pd.DataFrame()
soltest = soltest.assign(filename=Id)
sol = pd.read_csv('/kaggle/input/cassava-disease-classification/submission.csv')
sol.head() | code |
130013718/cell_10 | [
"text_html_output_1.png"
] | from PIL import Image
import numpy as np
import numpy as np # linear algebra
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/train'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_train = pd.DataFrame()
sol_train = sol_train.assign(filename=Id)
Id = []
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/cassava-disease-classification/test'):
for filename in filenames:
Id.append(os.path.join(dirname, filename))
Id[:5]
sol_test = pd.DataFrame()
sol_test = sol_test.assign(filename=Id)
sol_test['label'] = sol_test['filename']
sol_test['label'] = sol_test['label'].str.replace('/kaggle/input/cassava-disease-classification/test/', '')
sol_test['label'] = sol_test['label'].str.split('/').str[0]
import tensorflow as tf
import numpy as np
from PIL import Image
model = tf.saved_model.load('/kaggle/input/efficientnet-cassava-disease-classification/EfficientNet')
classes = ['Cassava Bacterial Blight (CBB)', 'Cassava Brown Streak Disease (CBSD)', 'Cassava Green Mottle (CGM)', 'Cassava Mosaic Disease (CMD)', 'Healthy']
result = []
for i in sol_test.filename:
img = Image.open(i).convert('RGB')
img = img.resize((300, 300 * img.size[1] // img.size[0]), Image.ANTIALIAS)
inp_numpy = np.array(img)[None]
inp = tf.constant(inp_numpy, dtype='float32')
class_scores = model(inp)[0].numpy()
result.append(classes[class_scores.argmax()])
result[:5] | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.