path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73072460/cell_56 | [
"text_html_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model = tf.keras.models.Sequential()
cnn_model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(224, 224, 3)))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Flatten())
cnn_model.add(tf.keras.layers.Dense(256, activation='relu'))
cnn_model.add(tf.keras.layers.Dropout(0.4))
cnn_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model.summary()
cnn_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
history = cnn_model.fit(training_generator, steps_per_epoch=99, validation_data=validation_generator, validation_steps=20, epochs=25) | code |
73072460/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
import pathlib
data_path = pathlib.Path('../input/a-large-scale-fish-dataset/Fish_Dataset/Fish_Dataset')
all_images = list(data_path.glob('*/*/*.jpg')) + list(data_path.glob('*/*/*.png'))
images = []
labels = []
for item in all_images:
path = os.path.normpath(item)
splits = path.split(os.sep)
if 'GT' not in splits[-2]:
images.append(item)
label = splits[-2]
labels.append(label)
image_pathes = pd.Series(images).astype(str)
labels = pd.Series(labels)
dataframe = pd.concat([image_pathes, labels], axis=1)
dataframe.columns = ['images', 'labels']
dataframe.head() | code |
73072460/cell_40 | [
"text_html_output_1.png"
] | from IPython.display import Image
Image(url='https://miro.medium.com/max/658/0*jLoqqFsO-52KHTn9.gif', width=750, height=500) | code |
73072460/cell_11 | [
"text_html_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64) | code |
73072460/cell_60 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model = tf.keras.models.Sequential()
cnn_model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(224, 224, 3)))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Flatten())
cnn_model.add(tf.keras.layers.Dense(256, activation='relu'))
cnn_model.add(tf.keras.layers.Dropout(0.4))
cnn_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model.summary()
cnn_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
history = cnn_model.fit(training_generator, steps_per_epoch=99, validation_data=validation_generator, validation_steps=20, epochs=25)
cnn_model.evaluate(test_generator) | code |
73072460/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import pathlib
data_path = pathlib.Path('../input/a-large-scale-fish-dataset/Fish_Dataset/Fish_Dataset')
all_images = list(data_path.glob('*/*/*.jpg')) + list(data_path.glob('*/*/*.png'))
images = []
labels = []
for item in all_images:
path = os.path.normpath(item)
splits = path.split(os.sep)
if 'GT' not in splits[-2]:
images.append(item)
label = splits[-2]
labels.append(label)
image_pathes = pd.Series(images).astype(str)
labels = pd.Series(labels)
dataframe = pd.concat([image_pathes, labels], axis=1)
dataframe.columns = ['images', 'labels']
fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(15, 10), subplot_kw={'xticks': [], 'yticks': []})
for i, ax in enumerate(axes.flat):
ax.imshow(plt.imread(dataframe.images[i]))
ax.set_title(dataframe.labels[i])
plt.show() | code |
73072460/cell_59 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import pathlib
import tensorflow as tf
data_path = pathlib.Path('../input/a-large-scale-fish-dataset/Fish_Dataset/Fish_Dataset')
all_images = list(data_path.glob('*/*/*.jpg')) + list(data_path.glob('*/*/*.png'))
images = []
labels = []
for item in all_images:
path = os.path.normpath(item)
splits = path.split(os.sep)
if 'GT' not in splits[-2]:
images.append(item)
label = splits[-2]
labels.append(label)
image_pathes = pd.Series(images).astype(str)
labels = pd.Series(labels)
dataframe = pd.concat([image_pathes, labels], axis=1)
dataframe.columns = ['images', 'labels']
fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(15,10), subplot_kw={'xticks':[], 'yticks':[]})
for i, ax in enumerate(axes.flat):
ax.imshow(plt.imread(dataframe.images[i]))
ax.set_title(dataframe.labels[i])
plt.show()
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model = tf.keras.models.Sequential()
cnn_model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(224, 224, 3)))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Flatten())
cnn_model.add(tf.keras.layers.Dense(256, activation='relu'))
cnn_model.add(tf.keras.layers.Dropout(0.4))
cnn_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model.summary()
cnn_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
history = cnn_model.fit(training_generator, steps_per_epoch=99, validation_data=validation_generator, validation_steps=20, epochs=25)
cnn_train_loss = history.history['loss']
cnn_val_loss = history.history['val_loss']
train_acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(history.epoch, train_acc, label='Training Accuracy')
plt.plot(history.epoch, val_acc, label='Validation Accuracy')
plt.grid(True)
plt.legend() | code |
73072460/cell_58 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import pathlib
import tensorflow as tf
data_path = pathlib.Path('../input/a-large-scale-fish-dataset/Fish_Dataset/Fish_Dataset')
all_images = list(data_path.glob('*/*/*.jpg')) + list(data_path.glob('*/*/*.png'))
images = []
labels = []
for item in all_images:
path = os.path.normpath(item)
splits = path.split(os.sep)
if 'GT' not in splits[-2]:
images.append(item)
label = splits[-2]
labels.append(label)
image_pathes = pd.Series(images).astype(str)
labels = pd.Series(labels)
dataframe = pd.concat([image_pathes, labels], axis=1)
dataframe.columns = ['images', 'labels']
fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(15,10), subplot_kw={'xticks':[], 'yticks':[]})
for i, ax in enumerate(axes.flat):
ax.imshow(plt.imread(dataframe.images[i]))
ax.set_title(dataframe.labels[i])
plt.show()
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model = tf.keras.models.Sequential()
cnn_model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(224, 224, 3)))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Flatten())
cnn_model.add(tf.keras.layers.Dense(256, activation='relu'))
cnn_model.add(tf.keras.layers.Dropout(0.4))
cnn_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model.summary()
cnn_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
history = cnn_model.fit(training_generator, steps_per_epoch=99, validation_data=validation_generator, validation_steps=20, epochs=25)
cnn_train_loss = history.history['loss']
cnn_val_loss = history.history['val_loss']
plt.plot(history.epoch, cnn_train_loss, label='Training Loss')
plt.plot(history.epoch, cnn_val_loss, label='Validation Loss')
plt.grid(True)
plt.legend() | code |
73072460/cell_16 | [
"image_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
tf.keras.utils.plot_model(mlp_model, show_shapes=True, show_dtype=True, show_layer_names=True) | code |
73072460/cell_47 | [
"text_html_output_1.png"
] | from IPython.display import Image
Image(url='https://nico-curti.github.io/NumPyNet/NumPyNet/images/maxpool.gif', width=750, height=500) | code |
73072460/cell_17 | [
"text_plain_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
mlp_model.summary() | code |
73072460/cell_31 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from IPython.display import Image
Image(url='https://www.researchgate.net/profile/Lavender-Jiang-2/publication/343441194/figure/fig2/AS:921001202311168@1596595206463/Basic-CNN-architecture-and-kernel-A-typical-CNN-consists-of-several-component-types.ppm', width=750, height=500) | code |
73072460/cell_53 | [
"text_html_output_1.png"
] | import tensorflow as tf
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True)
training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64)
mlp_model = tf.keras.models.Sequential()
mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3)))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dropout(0.4))
mlp_model.add(tf.keras.layers.Dense(256, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(128, activation='relu'))
mlp_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model = tf.keras.models.Sequential()
cnn_model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(224, 224, 3)))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1, activation='relu'))
cnn_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
cnn_model.add(tf.keras.layers.Flatten())
cnn_model.add(tf.keras.layers.Dense(256, activation='relu'))
cnn_model.add(tf.keras.layers.Dropout(0.4))
cnn_model.add(tf.keras.layers.Dense(9, activation='softmax'))
cnn_model.summary() | code |
73072460/cell_27 | [
"text_plain_output_1.png"
] | from IPython.display import Image
from IPython.display import Image
Image(url='https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRwed5zvnSDt0zrFd_gf-kUIMoF7Nm6FXIwDw&usqp=CAU', width=750, height=500) | code |
73072460/cell_37 | [
"text_html_output_1.png"
] | from IPython.display import Image
Image(url='https://i.stack.imgur.com/CQtHP.gif', width=750, height=500) | code |
74042725/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols | code |
74042725/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns | code |
74042725/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns | code |
74042725/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sales.Ship_Mode.value_counts()
sales.Region.value_counts()
plt.figure(figsize=(5, 5))
corp = sales.loc[sales['Customer_Segment'] == 'Corporate'].count()[0]
cons = sales.loc[sales['Customer_Segment'] == 'Consumer'].count()[0]
hoff = sales.loc[sales['Customer_Segment'] == 'Home Office'].count()[0]
sbiz = sales.loc[sales['Customer_Segment'] == 'Small Business'].count()[0]
explode = (0.1, 0.1, 0.1, 0.1)
labels = ['Corporate', 'Consumer', 'Home Office', 'Small Business']
plt.pie([corp, cons, hoff, sbiz], labels=labels, autopct='%.2f %%', explode=explode)
plt.title('Customer Segment')
plt.show()
sales.Customer_Segment.value_counts() | code |
74042725/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sales.Ship_Mode.value_counts()
sns.countplot(sales.Region)
sales.Region.value_counts() | code |
74042725/cell_6 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
len(sales['Order_ID'].unique()) | code |
74042725/cell_29 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sales.Ship_Mode.value_counts()
sales.Region.value_counts()
corp = sales.loc[sales['Customer_Segment'] == 'Corporate'].count()[0]
cons = sales.loc[sales['Customer_Segment'] == 'Consumer'].count()[0]
hoff = sales.loc[sales['Customer_Segment'] == 'Home Office'].count()[0]
sbiz = sales.loc[sales['Customer_Segment'] == 'Small Business'].count()[0]
explode = (0.1, 0.1, 0.1, 0.1)
labels = ['Corporate', 'Consumer', 'Home Office', 'Small Business']
sales.Customer_Segment.value_counts()
sales.Product_Category.value_counts()
sns.countplot(sales.Product_Container)
sales.Product_Container.value_counts() | code |
74042725/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sales.Ship_Mode.value_counts()
sales.Region.value_counts()
corp = sales.loc[sales['Customer_Segment'] == 'Corporate'].count()[0]
cons = sales.loc[sales['Customer_Segment'] == 'Consumer'].count()[0]
hoff = sales.loc[sales['Customer_Segment'] == 'Home Office'].count()[0]
sbiz = sales.loc[sales['Customer_Segment'] == 'Small Business'].count()[0]
explode = (0.1, 0.1, 0.1, 0.1)
labels = ['Corporate', 'Consumer', 'Home Office', 'Small Business']
sales.Customer_Segment.value_counts()
sns.countplot(sales.Product_Category)
sales.Product_Category.value_counts() | code |
74042725/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
print(cont_cols) | code |
74042725/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
len(sales['Product_Name'].unique()) | code |
74042725/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.head() | code |
74042725/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sns.countplot(sales.Ship_Mode)
sales.Ship_Mode.value_counts() | code |
74042725/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape
sales_id = sales['Order_ID']
sales_pname = sales['Product_Name']
sales = sales.drop(columns='Order_ID')
sales = sales.drop(columns='Product_Name')
sales.columns
num_cols = sales._get_numeric_data().columns
cont_cols = []
for i in num_cols:
if len(sales[i].unique()) > int(sales.shape[0] / 25):
cont_cols.append(i)
cat_cols = list(set(sales.columns) - set(cont_cols))
cat_cols
sales.describe() | code |
74042725/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
sales = pd.read_csv('../input/sales-store-product-details/Salesstore.csv')
sales.columns
sales.shape | code |
104129189/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020") | code |
104129189/cell_39 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
sns.set_style('whitegrid')
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
fig = plt.subplots(1,1,figsize = (20,8))
_ = sns.heatmap(df.corr(),annot=True)
serieengineSize = serieengineSize[serieengineSize < 10]
df.engineSize = df['engineSize'].apply(lambda x: 'Other' if x in serieengineSize else x)
serie_model = df.model.value_counts()
serie_model = serie_model[serie_model < 10]
df.model = df['model'].apply(lambda x: 'Other' if x in serie_model else x)
seriefuelType = seriefuelType[seriefuelType < 10]
df.fuelType = df['fuelType'].apply(lambda x: 'Other' if x in seriefuelType else x)
for column in ['fuelType', 'engineSize', 'model']:
df = df.query("{} != 'Other'".format(column))
def Histogram(x):
fig = plt.subplots(1,1,figsize = (20,8))
return sns.histplot(data = df, x = x,color = 'c')
_ = Histogram(df.year)
df.query('year == 2006') | code |
104129189/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104129189/cell_45 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
sns.set_style('whitegrid')
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
fig = plt.subplots(1,1,figsize = (20,8))
_ = sns.heatmap(df.corr(),annot=True)
serieengineSize = serieengineSize[serieengineSize < 10]
df.engineSize = df['engineSize'].apply(lambda x: 'Other' if x in serieengineSize else x)
serie_model = df.model.value_counts()
serie_model = serie_model[serie_model < 10]
df.model = df['model'].apply(lambda x: 'Other' if x in serie_model else x)
seriefuelType = seriefuelType[seriefuelType < 10]
df.fuelType = df['fuelType'].apply(lambda x: 'Other' if x in seriefuelType else x)
for column in ['fuelType', 'engineSize', 'model']:
df = df.query("{} != 'Other'".format(column))
def Histogram(x):
fig = plt.subplots(1,1,figsize = (20,8))
return sns.histplot(data = df, x = x,color = 'c')
_ = Histogram(df.year)
df.query('year == 2006')
df = df.query('year >= 2006')
df = df.query('mileage > 1000')
df.price.max()
_ = Histogram(df.price) | code |
104129189/cell_18 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
def serie(data):
return df[data].value_counts().sort_values(ascending=True)
def Barplot(serie, title):
colors = ['#77dd77', '#fdfd96', '#84b6f4', '#fdcae1', '#b2e2f2', '#ffda9e']
return
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
_ = Barplot(serie=serie_mean_model, title='Mean By Model') | code |
104129189/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
df.info() | code |
104129189/cell_15 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
def serie(data):
return df[data].value_counts().sort_values(ascending=True)
def Barplot(serie, title):
colors = ['#77dd77', '#fdfd96', '#84b6f4', '#fdcae1', '#b2e2f2', '#ffda9e']
return
_ = Barplot(serie=serieTransmission, title='Transmission') | code |
104129189/cell_38 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
sns.set_style('whitegrid')
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
fig = plt.subplots(1,1,figsize = (20,8))
_ = sns.heatmap(df.corr(),annot=True)
serieengineSize = serieengineSize[serieengineSize < 10]
df.engineSize = df['engineSize'].apply(lambda x: 'Other' if x in serieengineSize else x)
serie_model = df.model.value_counts()
serie_model = serie_model[serie_model < 10]
df.model = df['model'].apply(lambda x: 'Other' if x in serie_model else x)
seriefuelType = seriefuelType[seriefuelType < 10]
df.fuelType = df['fuelType'].apply(lambda x: 'Other' if x in seriefuelType else x)
for column in ['fuelType', 'engineSize', 'model']:
df = df.query("{} != 'Other'".format(column))
def Histogram(x):
fig = plt.subplots(1,1,figsize = (20,8))
return sns.histplot(data = df, x = x,color = 'c')
_ = Histogram(df.year) | code |
104129189/cell_43 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
sns.set_style('whitegrid')
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
fig = plt.subplots(1,1,figsize = (20,8))
_ = sns.heatmap(df.corr(),annot=True)
serieengineSize = serieengineSize[serieengineSize < 10]
df.engineSize = df['engineSize'].apply(lambda x: 'Other' if x in serieengineSize else x)
serie_model = df.model.value_counts()
serie_model = serie_model[serie_model < 10]
df.model = df['model'].apply(lambda x: 'Other' if x in serie_model else x)
seriefuelType = seriefuelType[seriefuelType < 10]
df.fuelType = df['fuelType'].apply(lambda x: 'Other' if x in seriefuelType else x)
for column in ['fuelType', 'engineSize', 'model']:
df = df.query("{} != 'Other'".format(column))
def Histogram(x):
fig = plt.subplots(1,1,figsize = (20,8))
return sns.histplot(data = df, x = x,color = 'c')
_ = Histogram(df.year)
df.query('year == 2006')
df = df.query('year >= 2006')
df = df.query('mileage > 1000')
df.price.max() | code |
104129189/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
sns.set_style('whitegrid')
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
fig = plt.subplots(1, 1, figsize=(20, 8))
_ = sns.heatmap(df.corr(), annot=True) | code |
104129189/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
def serie(data):
return df[data].value_counts().sort_values(ascending=True)
def Barplot(serie, title):
colors = ['#77dd77', '#fdfd96', '#84b6f4', '#fdcae1', '#b2e2f2', '#ffda9e']
return
_ = Barplot(serie=serieengineSize, title='Engine Size Counts') | code |
104129189/cell_22 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
def serie(data):
return df[data].value_counts().sort_values(ascending=True)
def Barplot(serie, title):
colors = ['#77dd77', '#fdfd96', '#84b6f4', '#fdcae1', '#b2e2f2', '#ffda9e']
return
serie_mean_model = df.groupby('model')['price'].mean().sort_values(ascending=True)
mean_mpg = df.groupby('fuelType')['mpg'].mean().sort_values(ascending=True)
_ = Barplot(serie=mean_mpg, title='Mean MPG') | code |
104129189/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ford-car-price-prediction/ford.csv')
df.query("model == ' Mustang' and year == 2020")
def serie(data):
return df[data].value_counts().sort_values(ascending=True)
def Barplot(serie, title):
colors = ['#77dd77', '#fdfd96', '#84b6f4', '#fdcae1', '#b2e2f2', '#ffda9e']
return
_ = Barplot(serie=seriefuelType, title='FuelType Counts') | code |
106209352/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
categorical_features = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O']
categorical_features | code |
106209352/cell_9 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature | code |
106209352/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na | code |
106209352/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature | code |
106209352/cell_2 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
dataset.head() | code |
106209352/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
data[feature].hist(bins=20)
plt.xlabel(feature)
plt.ylabel('counts')
plt.show() | code |
106209352/cell_19 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
categorical_features = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O']
categorical_features
dataset.corr()
def correlation(dataset, threshold):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
correlation_features = correlation(dataset, 0.7)
correlation_features | code |
106209352/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature | code |
106209352/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
data.groupby(feature)['target'].median().plot.bar()
plt.xlabel(feature)
plt.ylabel('target')
plt.show() | code |
106209352/cell_16 | [
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
categorical_features = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O']
categorical_features
dataset.corr() | code |
106209352/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
categorical_features = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O']
categorical_features
dataset.corr()
sns.heatmap(dataset.corr()) | code |
106209352/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
categorical_features = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O']
categorical_features
for feature in categorical_features:
print('The Feature is {} and the no of categories are: {}'.format(feature, len(dataset[feature].unique()))) | code |
106209352/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
data[feature].hist(bins=20)
plt.xlabel(feature)
plt.ylabel('count')
plt.show() | code |
106209352/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum()
numerical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']
numerical_feature
discrete_feature = [feature for feature in numerical_feature if len(dataset[feature].unique()) < 15]
discrete_feature
for feature in discrete_feature:
data = dataset.copy()
continuous_feature = [feature for feature in numerical_feature if feature not in discrete_feature]
continuous_feature
for feature in continuous_feature:
data = dataset.copy()
for feature in continuous_feature:
data[feature] = np.log(data[feature])
for feature in continuous_feature:
data = dataset.copy()
data[feature] = np.log(data[feature])
data.boxplot(column=feature)
plt.ylabel(feature)
plt.title(feature)
plt.show() | code |
106209352/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('../input/the-spotify-hit-predictor-dataset/dataset-of-90s.csv')
features_with_na = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1]
features_with_na
dataset.isnull().sum() | code |
122251856/cell_5 | [
"image_output_11.png",
"image_output_17.png",
"image_output_14.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_7.png",
"image_output_20.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png"
] | from cv2 import resize
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import tqdm.auto as tqdm
olci_root = '/kaggle/input/medisar-olci'
available_olci = os.listdir(olci_root)
def plot_olci(key):
with open(f'{olci_root}/{key}/metadata.txt', 'r') as file:
lines = [line.replace('\n', '') for line in file.readlines()]
min_value = eval(lines[1][4:])
max_value = eval(lines[2][4:])
delta = eval(lines[3][6:])
im = np.array(PIL.Image.open(f'{olci_root}/{key}/{key}_S3.png')).astype(float)
im[im == 0] = np.nan
im = min_value + delta * (im - 1)
title = ' '.join(lines[0].split(' ')[:3])
plt.colorbar(orientation='horizontal', fraction=0.046)
plt.axis(False)
i = 0
max_slicks = 0
folders = glob.glob('/kaggle/input/medisar20??/*/*/*')
pbar = tqdm.tqdm(folders, smoothing=0.001)
for folder in pbar:
content = os.listdir(folder)
if 'era5_wind_speed_256.png' not in content:
continue
wind = np.array(PIL.Image.open(folder + '/era5_wind_speed_256.png')) / 10
if wind[wind > 0].mean() > 5:
continue
mask = np.array(PIL.Image.open(folder + '/mask.png')).astype('float')
if mask.mean() > 0.5:
continue
slicks = np.array(PIL.Image.open(folder + '/biological_slicks.png')) / 255
slicks[slicks < 0.75] = 0
max_slicks = max(max_slicks, slicks.mean())
pbar.set_description(f'{max_slicks}')
if slicks.mean() < 0.05:
continue
key = os.path.split(folder)[1].lower()
if not key in available_olci:
continue
sar = np.array(PIL.Image.open(glob.glob(folder + '/*vv*')[0]))
slicks[resize(mask, slicks.shape[::-1]) > 0.5] = np.nan
plt.figure(figsize=(20, 6))
plt.suptitle(os.path.split(folder)[1], fontweight='bold')
plt.subplot(131)
plt.imshow(sar, cmap='gray', vmin=0, vmax=2 ** 12)
plt.colorbar(orientation='horizontal', fraction=0.046)
plt.axis(False)
plt.subplot(132)
plt.imshow(slicks, cmap='gray', vmin=0, vmax=1)
plt.colorbar(orientation='horizontal', fraction=0.046)
plt.axis(False)
plt.subplot(133)
plot_olci(key)
plt.tight_layout()
plt.show()
plt.close()
i += 1
if i == 20:
break | code |
89123667/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
def train_eval_kfolds(X, y, n_splits=5, random_state=None):
assert hasattr(X, 'iloc')
assert hasattr(y, 'iloc')
skf = TimeSeriesSplit(n_splits=n_splits)
for train_index, eval_index in skf.split(X, y):
X_train, X_eval = (X.iloc[train_index], X.iloc[eval_index])
y_train, y_eval = (y.iloc[train_index], y.iloc[eval_index])
yield (X_train, X_eval, y_train, y_eval)
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
features = [c for c in train_df.columns if c not in ['congestion']]
X = train_df[features]
y = train_df['congestion']
features
class Pipeline:
object_cols = None
def transform(self, X):
X2 = X.copy()
date_series = pd.to_datetime(X['time'])
date_accessor = date_series.dt
for attr in ['date', 'day', 'day_of_week', 'day_of_year', 'days_in_month', 'freq', 'hour', 'is_leap_year', 'is_month_end', 'is_month_start', 'is_quarter_end', 'is_quarter_start', 'is_year_end', 'is_year_start', 'microsecond', 'minute', 'month', 'nanosecond', 'quarter', 'second', 'time', 'weekday', 'year']:
X2[attr] = getattr(date_accessor, attr)
X2['week'] = date_accessor.isocalendar().week.astype(int)
X2['original_time'] = X['time']
X2['time'] = X2['time'].astype(str)
self.object_cols = list(X2.dtypes[X2.dtypes == object].index)
X2[self.object_cols] = X2[self.object_cols].astype(str)
return X2
test_df = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id')
submission_df = test_df[[]].copy()
submission_df | code |
89123667/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
target_mean = train_df['congestion'].mean()
target_mean
(train_df['congestion'] - target_mean).abs().mean() | code |
89123667/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
train_df['congestion'] | code |
89123667/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('../input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89123667/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
train_df['congestion'].hist(bins=100) | code |
89123667/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
target_mean = train_df['congestion'].mean()
target_mean | code |
89123667/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
def train_eval_kfolds(X, y, n_splits=5, random_state=None):
assert hasattr(X, 'iloc')
assert hasattr(y, 'iloc')
skf = TimeSeriesSplit(n_splits=n_splits)
for train_index, eval_index in skf.split(X, y):
X_train, X_eval = (X.iloc[train_index], X.iloc[eval_index])
y_train, y_eval = (y.iloc[train_index], y.iloc[eval_index])
yield (X_train, X_eval, y_train, y_eval)
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
features = [c for c in train_df.columns if c not in ['congestion']]
X = train_df[features]
y = train_df['congestion']
features
class Pipeline:
object_cols = None
def transform(self, X):
X2 = X.copy()
date_series = pd.to_datetime(X['time'])
date_accessor = date_series.dt
for attr in ['date', 'day', 'day_of_week', 'day_of_year', 'days_in_month', 'freq', 'hour', 'is_leap_year', 'is_month_end', 'is_month_start', 'is_quarter_end', 'is_quarter_start', 'is_year_end', 'is_year_start', 'microsecond', 'minute', 'month', 'nanosecond', 'quarter', 'second', 'time', 'weekday', 'year']:
X2[attr] = getattr(date_accessor, attr)
X2['week'] = date_accessor.isocalendar().week.astype(int)
X2['original_time'] = X['time']
X2['time'] = X2['time'].astype(str)
self.object_cols = list(X2.dtypes[X2.dtypes == object].index)
X2[self.object_cols] = X2[self.object_cols].astype(str)
return X2
test_df = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id')
test_df.head() | code |
89123667/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
def train_eval_kfolds(X, y, n_splits=5, random_state=None):
assert hasattr(X, 'iloc')
assert hasattr(y, 'iloc')
skf = TimeSeriesSplit(n_splits=n_splits)
for train_index, eval_index in skf.split(X, y):
X_train, X_eval = (X.iloc[train_index], X.iloc[eval_index])
y_train, y_eval = (y.iloc[train_index], y.iloc[eval_index])
yield (X_train, X_eval, y_train, y_eval)
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
features = [c for c in train_df.columns if c not in ['congestion']]
X = train_df[features]
y = train_df['congestion']
features
class Pipeline:
object_cols = None
def transform(self, X):
X2 = X.copy()
date_series = pd.to_datetime(X['time'])
date_accessor = date_series.dt
for attr in ['date', 'day', 'day_of_week', 'day_of_year', 'days_in_month', 'freq', 'hour', 'is_leap_year', 'is_month_end', 'is_month_start', 'is_quarter_end', 'is_quarter_start', 'is_year_end', 'is_year_start', 'microsecond', 'minute', 'month', 'nanosecond', 'quarter', 'second', 'time', 'weekday', 'year']:
X2[attr] = getattr(date_accessor, attr)
X2['week'] = date_accessor.isocalendar().week.astype(int)
X2['original_time'] = X['time']
X2['time'] = X2['time'].astype(str)
self.object_cols = list(X2.dtypes[X2.dtypes == object].index)
X2[self.object_cols] = X2[self.object_cols].astype(str)
return X2
myPipeline = Pipeline()
X_transformed = myPipeline.transform(X)
test_df = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id')
test_df_transformed = myPipeline.transform(test_df)
test_df_transformed | code |
89123667/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
from sklearn.metrics import accuracy_score
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split as train_eval_split
from lightgbm import LGBMClassifier
import pdb
from sklearn.preprocessing import StandardScaler
from math import factorial
import re
from catboost import CatBoostRegressor | code |
89123667/cell_14 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
print('overall scores', np.mean(scores)) | code |
89123667/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
features = [c for c in train_df.columns if c not in ['congestion']]
X = train_df[features]
y = train_df['congestion']
features | code |
89123667/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nrows = None
train_df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', nrows=nrows)
train_df.head() | code |
33102709/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
# reshape dataframe into pivot table
d = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'avg_peak_bed_usage')
# set visualization features like title, axis names, and legend name
d.columns.name = 'Initial Infections'
plot1 = d.plot(title = 'Policy Strictness v. Hospital Bed Demand')
plot1.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Hospital Bed Demand')
# reshape dataframe into pivot table
f = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'success_rate')
# set visualization features like title, axis names, and legend name
f.columns.name = 'Initial Infections'
plot2 = f.plot(title = 'Policy Strictness v. Simulation Success Rate')
plot2.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Success Rate')
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 2)]
s.tail(1) | code |
33102709/cell_23 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
# reshape dataframe into pivot table
d = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'avg_peak_bed_usage')
# set visualization features like title, axis names, and legend name
d.columns.name = 'Initial Infections'
plot1 = d.plot(title = 'Policy Strictness v. Hospital Bed Demand')
plot1.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Hospital Bed Demand')
# reshape dataframe into pivot table
f = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'success_rate')
# set visualization features like title, axis names, and legend name
f.columns.name = 'Initial Infections'
plot2 = f.plot(title = 'Policy Strictness v. Simulation Success Rate')
plot2.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Success Rate')
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 2)]
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 20)]
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 40)]
s.tail(1) | code |
33102709/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
# reshape dataframe into pivot table
d = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'avg_peak_bed_usage')
# set visualization features like title, axis names, and legend name
d.columns.name = 'Initial Infections'
plot1 = d.plot(title = 'Policy Strictness v. Hospital Bed Demand')
plot1.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Hospital Bed Demand')
f = case_c.pivot(index='x_value', columns='n_value', values='success_rate')
f.columns.name = 'Initial Infections'
plot2 = f.plot(title='Policy Strictness v. Simulation Success Rate')
plot2.set(xlabel='Maximum Allowed Group Size', ylabel='Success Rate') | code |
33102709/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
d = case_c.pivot(index='x_value', columns='n_value', values='avg_peak_bed_usage')
d.columns.name = 'Initial Infections'
plot1 = d.plot(title='Policy Strictness v. Hospital Bed Demand')
plot1.set(xlabel='Maximum Allowed Group Size', ylabel='Hospital Bed Demand') | code |
33102709/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
# reshape dataframe into pivot table
d = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'avg_peak_bed_usage')
# set visualization features like title, axis names, and legend name
d.columns.name = 'Initial Infections'
plot1 = d.plot(title = 'Policy Strictness v. Hospital Bed Demand')
plot1.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Hospital Bed Demand')
# reshape dataframe into pivot table
f = case_c.pivot(index = 'x_value', columns = 'n_value', values = 'success_rate')
# set visualization features like title, axis names, and legend name
f.columns.name = 'Initial Infections'
plot2 = f.plot(title = 'Policy Strictness v. Simulation Success Rate')
plot2.set(xlabel = 'Maximum Allowed Group Size', ylabel = 'Success Rate')
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 2)]
s = case_c[(case_c.success_rate >= 0.9) & (case_c.n_value == 20)]
s.tail(1) | code |
33102709/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
case_c = pd.read_csv('/kaggle/input/group-c-data/analysis_data_test.csv')
case_c.head(10) | code |
2002244/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
workdir = '../input/'
basins_data = pd.read_csv(workdir + 'BasinCharacteristics.csv')
features_cols = basins_data.columns[1:4]
target_col = basins_data.columns[0]
print('Feature column(s):\n{}\n'.format(features_cols))
print('Target column:\n{}'.format(target_col))
print(basins_data.head())
print(basins_data.describe())
X = basins_data[features_cols]
y = basins_data[target_col] | code |
2002244/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
workdir = '../input/'
basins_data = pd.read_csv(workdir + 'BasinCharacteristics.csv')
features_cols = basins_data.columns[1:4]
target_col = basins_data.columns[0]
X = basins_data[features_cols]
y = basins_data[target_col]
y_pred = KMeans(n_clusters=4).fit_predict(X)
y_pred | code |
2002244/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import plotly
import pandas as pd
import pylab
import matplotlib.pyplot as plt
import calendar
import seaborn
import math
from sklearn.svm import SVR
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn import preprocessing
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from scipy import stats
from sklearn.model_selection import cross_val_score
from math import sqrt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets | code |
2002244/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2002244/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
workdir = '../input/'
basins_data = pd.read_csv(workdir + 'BasinCharacteristics.csv')
features_cols = basins_data.columns[1:4]
target_col = basins_data.columns[0]
X = basins_data[features_cols]
y = basins_data[target_col]
def elbow_plot(data, maxK=10, seed_centroids=None):
"""
parameters:
- data: pandas DataFrame (data to be fitted)
- maxK (default = 10): integer (maximum number of clusters with which to run k-means)
- seed_centroids (default = None ): float (initial value of centroids for k-means)
"""
sse = {}
for k in range(1, maxK):
print('k: ', k)
if seed_centroids is not None:
seeds = seed_centroids.head(k)
kmeans = KMeans(n_clusters=k, max_iter=500, n_init=100, random_state=0, init=np.reshape(seeds, (k, 1))).fit(data)
data['clusters'] = kmeans.labels_
else:
kmeans = KMeans(n_clusters=k, max_iter=300, n_init=100, random_state=0).fit(data)
data['clusters'] = kmeans.labels_
sse[k] = kmeans.inertia_
plt.figure()
plt.plot(list(sse.keys()), list(sse.values()))
plt.show()
return
elbow_plot(X, maxK=10) | code |
2002244/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
workdir = '../input/'
basins_data = pd.read_csv(workdir + 'BasinCharacteristics.csv')
basins_data.head() | code |
2002244/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
workdir = '../input/'
basins_data = pd.read_csv(workdir + 'BasinCharacteristics.csv')
features_cols = basins_data.columns[1:4]
target_col = basins_data.columns[0]
X = basins_data[features_cols]
y = basins_data[target_col]
X | code |
129015254/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set() | code |
129015254/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
college_categories = ['IIM A', 'IIM B', 'IIM C', 'IIM L', 'IIM I', 'IIM K']
time_periods = [2019, 2020, 2021, 2022]
avg_sal_data = np.array([[12, 10, 12, 13], [11, 9, 10, 12], [11, 8, 9, 10], [10, 7, 8, 9], [9, 8, 7, 8], [8, 7, 8, 9]])
ranks = np.argsort(avg_sal_data, axis=0)[::-1]
rank_data = np.zeros_like(avg_sal_data)
for i in range(len(time_periods)):
rank_data[:, i] = ranks[:, i] + 1
rank_data | code |
129015254/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
college_categories = ['IIM A', 'IIM B', 'IIM C', 'IIM L', 'IIM I', 'IIM K']
time_periods = [2019, 2020, 2021, 2022]
avg_sal_data = np.array([[12, 10, 12, 13], [11, 9, 10, 12], [11, 8, 9, 10], [10, 7, 8, 9], [9, 8, 7, 8], [8, 7, 8, 9]])
ranks = np.argsort(avg_sal_data, axis=0)[::-1]
rank_data = np.zeros_like(avg_sal_data)
for i in range(len(time_periods)):
rank_data[:, i] = ranks[:, i] + 1
rank_data
df = pd.DataFrame(6 - rank_data.T, columns=[str(i) for i in college_categories])
df['Time'] = time_periods
fig, ax = plt.subplots()
fig = df.plot(x='Time', kind='bar', stacked=False, ax=ax)
ax.set_xlabel('Year', fontsize=15, fontweight='bold')
ax.set_ylabel('Rank', fontsize=15, fontweight='bold')
ax.set_title('Bar Chart', fontsize=20, fontweight='bold')
ax.set_xticklabels(df.Time, rotation=0)
ax.set_yticklabels(['', 6, 5, 4, 3, 2, 1])
plt.show() | code |
73062593/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.shape | code |
73062593/cell_6 | [
"text_plain_output_1.png"
] | from sklearn import model_selection
import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.shape
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=df_train)):
df_train.loc[valid_indicies, 'kfold'] = fold
print(f'fold = {fold} <train_indicies = {train_indicies}> <valid_indicies = {valid_indicies}>')
print(len(train_indicies), len(valid_indicies)) | code |
73062593/cell_7 | [
"text_html_output_1.png"
] | from sklearn import model_selection
import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.shape
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=df_train)):
df_train.loc[valid_indicies, 'kfold'] = fold
df_train | code |
73062593/cell_8 | [
"image_output_1.png"
] | from sklearn import model_selection
import matplotlib.pyplot as plt
import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.shape
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=df_train)):
df_train.loc[valid_indicies, 'kfold'] = fold
f, ax = plt.subplots(1, 5, figsize=(30, 5))
for i in range(5):
df_train[df_train.kfold == i].target.hist(ax=ax[i]) | code |
73062593/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.head() | code |
73062593/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_train['kfold'] = -1
df_train.shape
df_train.target.hist() | code |
130007285/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
sorted_df = df.sort_values('Customer ID', ascending=False)
Avg_ticket_price = df.groupby('Ticket Price').mean()
Avg_delay_min = df.groupby('Delay Minutes').mean()
df.dropna()
df['Ticket Price'].mean() | code |
130007285/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
df.head() | code |
130007285/cell_23 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
sorted_df = df.sort_values('Customer ID', ascending=False)
Avg_ticket_price = df.groupby('Ticket Price').mean()
Avg_delay_min = df.groupby('Delay Minutes').mean()
df.dropna()
max_value = np.max(df['Ticket Price'])
max_value
min_value = np.min(df['Ticket Price'])
min_value | code |
130007285/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
sorted_df = df.sort_values('Customer ID', ascending=False)
Avg_ticket_price = df.groupby('Ticket Price').mean()
Avg_delay_min = df.groupby('Delay Minutes').mean()
df.dropna()
df['Ticket Price'].sum() | code |
130007285/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
sorted_df = df.sort_values('Customer ID', ascending=False)
Avg_ticket_price = df.groupby('Ticket Price').mean()
Avg_delay_min = df.groupby('Delay Minutes').mean()
df.dropna()
df.describe() | code |
130007285/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130007285/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
filtered_df = df[df['Delay Minutes'] > 60]
filtered_df.head() | code |
130007285/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv')
sorted_df = df.sort_values('Customer ID', ascending=False)
Avg_ticket_price = df.groupby('Ticket Price').mean()
Avg_delay_min = df.groupby('Delay Minutes').mean()
df.dropna() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.