path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
50211059/cell_24 | [
"text_html_output_1.png"
] | from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr()
df1 = df.T
df1
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='hamming')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='euclidean')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='manhattan')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='minkowski')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim | code |
50211059/cell_14 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['username'] = df['username'].astype(str)
df['username'] = le.fit_transform(df['username'])
df['status'] = df['status'].astype(str)
df['status'] = le.fit_transform(df['status'])
df['sex'] = df['sex'].astype(str)
df['sex'] = le.fit_transform(df['sex'])
df['orientation'] = df['orientation'].astype(str)
df['orientation'] = le.fit_transform(df['orientation'])
df['drinks'] = df['drinks'].astype(str)
df['drinks'] = le.fit_transform(df['drinks'])
df['drugs'] = df['drugs'].astype(str)
df['drugs'] = le.fit_transform(df['drugs'])
df['location'] = df['location'].astype(str)
df['location'] = le.fit_transform(df['location'])
df['pets'] = df['pets'].astype(str)
df['pets'] = le.fit_transform(df['pets'])
df['smokes'] = df['smokes'].astype(str)
df['smokes'] = le.fit_transform(df['smokes'])
df['language'] = df['language'].astype(str)
df['language'] = le.fit_transform(df['language'])
df['new_languages'] = df['new_languages'].astype(str)
df['new_languages'] = le.fit_transform(df['new_languages'])
df['body_profile'] = df['body_profile'].astype(str)
df['body_profile'] = le.fit_transform(df['body_profile'])
df['bio'] = df['bio'].astype(str)
df['bio'] = le.fit_transform(df['bio'])
df['interests'] = df['interests'].astype(str)
df['interests'] = le.fit_transform(df['interests'])
df['other_interests'] = df['other_interests'].astype(str)
df['other_interests'] = le.fit_transform(df['other_interests'])
df['location_preference'] = df['location_preference'].astype(str)
df['location_preference'] = le.fit_transform(df['location_preference'])
df['job'] = df['job'].astype(str)
df['job'] = le.fit_transform(df['job'])
df['dropped_out'] = df['dropped_out'].astype(str)
df['dropped_out'] = le.fit_transform(df['dropped_out'])
df | code |
50211059/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr()
df1 = df.T
df1
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='hamming')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='euclidean')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='manhattan')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim | code |
50211059/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df['user_id'] = df['user_id'].apply(lambda x: x.split('fffe')[1])
df | code |
50211059/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data | code |
17099534/cell_13 | [
"text_plain_output_1.png"
] | from tensorflow.keras import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import os
import tensorflow as tf
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=32, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=20, class_mode='categorical', target_size=(image_width, image_height))
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
from tensorflow.keras import Model
first_layer = vgg16.get_layer(index=0)
last_layer = vgg16.get_layer(index=-1)
vgg16_partical = Model(inputs=first_layer.input, outputs=last_layer.output)
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16_partical, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary()
model_using_vgg16.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model_using_vgg16.fit_generator(train_generator, validation_data=test_generator, epochs=15)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss') | code |
17099534/cell_9 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from tensorflow.keras import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
from tensorflow.keras import Model
first_layer = vgg16.get_layer(index=0)
last_layer = vgg16.get_layer(index=-1)
print(last_layer.output_shape)
vgg16_partical = Model(inputs=first_layer.input, outputs=last_layer.output) | code |
17099534/cell_4 | [
"text_plain_output_1.png"
] | import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
print(os.listdir(train_dir)) | code |
17099534/cell_7 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=32, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=20, class_mode='categorical', target_size=(image_width, image_height)) | code |
17099534/cell_8 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary() | code |
17099534/cell_3 | [
"text_plain_output_1.png"
] | import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
list_files('../input') | code |
17099534/cell_10 | [
"text_plain_output_1.png"
] | from tensorflow.keras import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
from tensorflow.keras import Model
first_layer = vgg16.get_layer(index=0)
last_layer = vgg16.get_layer(index=-1)
vgg16_partical = Model(inputs=first_layer.input, outputs=last_layer.output)
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16_partical, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary() | code |
17099534/cell_12 | [
"text_plain_output_1.png"
] | from tensorflow.keras import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import tensorflow as tf
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=32, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=20, class_mode='categorical', target_size=(image_width, image_height))
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
from tensorflow.keras import Model
first_layer = vgg16.get_layer(index=0)
last_layer = vgg16.get_layer(index=-1)
vgg16_partical = Model(inputs=first_layer.input, outputs=last_layer.output)
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16_partical, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary()
model_using_vgg16.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model_using_vgg16.fit_generator(train_generator, validation_data=test_generator, epochs=15) | code |
17099534/cell_5 | [
"text_plain_output_1.png"
] | import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
print(os.listdir(test_dir)) | code |
16120091/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
"""
now for the structures
"""
structures.columns
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
train['scalar_coupling_constant'].plot.hist(bins=35) | code |
16120091/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
"""
now for the structures
"""
structures.columns
print(structures['molecule_name'].nunique())
print(structures['atom_index'].nunique())
print(structures['atom'].nunique())
print('x y z :')
print(structures['x'].nunique())
print(structures['y'].nunique())
print(structures['z'].nunique()) | code |
16120091/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
print(train['id'].nunique())
print(train['molecule_name'].nunique())
print(train['id'].nunique() - train.shape[0]) | code |
16120091/cell_19 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
"""
now for the structures
"""
structures.columns
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
byAtom = all_data.groupby('atom')['scalar_coupling_constant'].agg('mean')
print(byAtom)
'\nthis is wierd, the F atom is very dieffrent from all others.\n' | code |
16120091/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
print(os.listdir('../input')) | code |
16120091/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
Fig, axarr = plt.subplots(2, 2, figsize=(12, 8))
Plt1 = train['atom_index_0'].plot.hist(ax=axarr[0][0])
Plt2 = train['atom_index_1'].plot.hist(ax=axarr[0][1])
Plt3 = train['type'].value_counts().plot.hist(ax=axarr[1][0])
Plt3 = train['scalar_coupling_constant'].plot.hist(ax=axarr[1][1], bins=50) | code |
16120091/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
"""
now for the structures
"""
structures.columns
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
byType = train.groupby('type')['scalar_coupling_constant'].agg('median')
print(byType) | code |
16120091/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
"""
now for the structures
"""
structures.columns | code |
16120091/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
Fig, axarr= plt.subplots(2, 2, figsize=(12,8)) # define a grid with 2 rows, 1 columns - two
#plots all in all insinde axarr
Plt1 = train["atom_index_0"].plot.hist(ax = axarr[0][0])
Plt2 = train["atom_index_1"].plot.hist(ax = axarr[0][1])
Plt3 = train["type"].value_counts().plot.hist(ax = axarr[1][0])
Plt3 = train["scalar_coupling_constant"].plot.hist(ax = axarr[1][1], bins =50)
"""
now for the structures
"""
structures.columns
Fig, axarr= plt.subplots(2, 2, figsize=(12,8)) # define a grid with 2 rows, 1 columns - two
#plots all in all insinde axarr
Plt1 = structures["molecule_name"].value_counts().plot.hist(ax = axarr[0][0], bins=50)
Plt2 = structures["atom_index"].plot.hist(ax = axarr[0][1])
Plt3 = structures["atom"].value_counts().plot.hist(ax = axarr[1][1])
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
"""
there is a huge peack around the 0 of this constant.
but I do not think these are na values or outliers.
lets now check if the train and test appeare to be from the same distrebution or not.
"""
structures_train = structures.loc[structures['molecule_name'].isin(train['molecule_name'])]
structures_test = structures.loc[structures['molecule_name'].isin(test['molecule_name'])]
"""
so !!
lets see what can I check..
# train: id, molecule_name, atom_index_0, atom_index_1, type, scalar_coupling_constant
# test : id, molecule_name, atom_index_0, atom_index_1, type
# structures_train: 'molecule_name', 'atom_index', 'atom', 'x', 'y', 'z'
# structures_test: 'molecule_name', 'atom_index', 'atom', 'x', 'y', 'z'
"""
'\n1. type - is there roughly the same type distrebution in both ?\n2. atom - are there roughly the same atom distrebution in both ?\n3. x ?\n4. y ?\n5. z ?\n'
Fig, axarr = plt.subplots(1, 2, figsize=(12, 8))
Plt1 = train['type'].value_counts().plot.hist(ax=axarr[0])
Plt2 = test['type'].value_counts().plot.hist(ax=axarr[1]) | code |
16120091/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
Fig, axarr= plt.subplots(2, 2, figsize=(12,8)) # define a grid with 2 rows, 1 columns - two
#plots all in all insinde axarr
Plt1 = train["atom_index_0"].plot.hist(ax = axarr[0][0])
Plt2 = train["atom_index_1"].plot.hist(ax = axarr[0][1])
Plt3 = train["type"].value_counts().plot.hist(ax = axarr[1][0])
Plt3 = train["scalar_coupling_constant"].plot.hist(ax = axarr[1][1], bins =50)
"""
now for the structures
"""
structures.columns
Fig, axarr= plt.subplots(2, 2, figsize=(12,8)) # define a grid with 2 rows, 1 columns - two
#plots all in all insinde axarr
Plt1 = structures["molecule_name"].value_counts().plot.hist(ax = axarr[0][0], bins=50)
Plt2 = structures["atom_index"].plot.hist(ax = axarr[0][1])
Plt3 = structures["atom"].value_counts().plot.hist(ax = axarr[1][1])
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
"""
there is a huge peack around the 0 of this constant.
but I do not think these are na values or outliers.
lets now check if the train and test appeare to be from the same distrebution or not.
"""
structures_train = structures.loc[structures['molecule_name'].isin(train['molecule_name'])]
structures_test = structures.loc[structures['molecule_name'].isin(test['molecule_name'])]
"""
so !!
lets see what can I check..
# train: id, molecule_name, atom_index_0, atom_index_1, type, scalar_coupling_constant
# test : id, molecule_name, atom_index_0, atom_index_1, type
# structures_train: 'molecule_name', 'atom_index', 'atom', 'x', 'y', 'z'
# structures_test: 'molecule_name', 'atom_index', 'atom', 'x', 'y', 'z'
"""
"""
1. type - is there roughly the same type distrebution in both ?
2. atom - are there roughly the same atom distrebution in both ?
3. x ?
4. y ?
5. z ?
"""
#1 type - is there roughly the same type distrebution in both ?
Fig, axarr= plt.subplots(1, 2, figsize=(12,8))
Plt1 = train["type"].value_counts().plot.hist(ax = axarr[0])
Plt2 = test["type"].value_counts().plot.hist(ax = axarr[1])
Fig, axarr = plt.subplots(1, 2, figsize=(12, 8))
Plt1 = structures_train['atom'].value_counts().plot.hist(ax=axarr[0])
Plt2 = structures_test['atom'].value_counts().plot.hist(ax=axarr[1]) | code |
16120091/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
print(train.isna().sum().sum())
print(structures.isna().sum().sum()) | code |
16120091/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
"""
now for the structures
"""
structures.columns
"""
looking for outliers.
soo..
we are dealing with:
molecule_name', 'atom_index', 'atom', 'x', 'y', 'z' - Structres.
'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type','scalar_coupling_constant'
"""
sample_data = train.sample(500)
all_data = pd.merge(sample_data, structures, how='left', on='molecule_name')
"""
there is a huge peack around the 0 of this constant.
but I do not think these are na values or outliers.
lets now check if the train and test appeare to be from the same distrebution or not.
"""
print(test.columns)
structures_train = structures.loc[structures['molecule_name'].isin(train['molecule_name'])]
structures_test = structures.loc[structures['molecule_name'].isin(test['molecule_name'])]
print(structures_train.shape)
print(structures_test.shape) | code |
16120091/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns
Fig, axarr= plt.subplots(2, 2, figsize=(12,8)) # define a grid with 2 rows, 1 columns - two
#plots all in all insinde axarr
Plt1 = train["atom_index_0"].plot.hist(ax = axarr[0][0])
Plt2 = train["atom_index_1"].plot.hist(ax = axarr[0][1])
Plt3 = train["type"].value_counts().plot.hist(ax = axarr[1][0])
Plt3 = train["scalar_coupling_constant"].plot.hist(ax = axarr[1][1], bins =50)
"""
now for the structures
"""
structures.columns
Fig, axarr = plt.subplots(2, 2, figsize=(12, 8))
Plt1 = structures['molecule_name'].value_counts().plot.hist(ax=axarr[0][0], bins=50)
Plt2 = structures['atom_index'].plot.hist(ax=axarr[0][1])
Plt3 = structures['atom'].value_counts().plot.hist(ax=axarr[1][1]) | code |
16120091/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
"""
In this notebook I will see what I can do - EDA-wise without the additional data.
I do that because it is not available for us in the test,
might be we an regenaerate them - but for now, the basic data is what I want to focus on.
"""
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
train.columns | code |
1007811/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | X_train.info() | code |
1007811/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | y_train = X_train['Survived'].copy()
X_train.drop('Survived', axis=1, inplace=True)
print(y_train.head())
print(X_train.info()) | code |
1007811/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
X = pd.read_csv('../input/train.csv', index_col=0)
y = X_train['Survived'].copy()
X.drop('Survived', axis=1, inplace=True)
print(X.head())
print(y.head()) | code |
1007811/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1007811/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.pipeline import Pipeline
pipe = Pipeline([()]) | code |
1007811/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | X_train.head() | code |
2041588/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys()
top_states = state_without_null[(state_without_null.RegionName == 'NewYork') | (state_without_null.RegionName == 'Washington') | (state_without_null.RegionName == 'Connecticut') | (state_without_null.RegionName == 'Maryland') | (state_without_null.RegionName == 'NewJersey') | (state_without_null.RegionName == 'Alaska') | (state_without_null.RegionName == 'Massachusetts') | (state_without_null.RegionName == 'California') | (state_without_null.RegionName == 'Hawaii') | (state_without_null.RegionName == 'DistrictofColumbia')]
pd.pivot_table(top_states, index='Date', columns='RegionName', values='ZHVIPerSqft_AllHomes').head(2) | code |
2041588/cell_9 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
plt.subplots(figsize=(14, 6))
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
plt.title('Top States with Most Home Values According to Zillow', fontsize=25)
plt.xlabel('States')
plt.ylabel('dollar amount') | code |
2041588/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys() | code |
2041588/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv') | code |
2041588/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys()
top_states = state_without_null[(state_without_null.RegionName == 'NewYork') | (state_without_null.RegionName == 'Washington') | (state_without_null.RegionName == 'Connecticut') | (state_without_null.RegionName == 'Maryland') | (state_without_null.RegionName == 'NewJersey') | (state_without_null.RegionName == 'Alaska') | (state_without_null.RegionName == 'Massachusetts') | (state_without_null.RegionName == 'California') | (state_without_null.RegionName == 'Hawaii') | (state_without_null.RegionName == 'DistrictofColumbia')]
pd.pivot_table(state_without_null, index='Date', columns='RegionName', values='ZHVIPerSqft_AllHomes')
city_time_series.Date = pd.to_datetime(city_time_series.Date)
city_time_series.columns | code |
2041588/cell_15 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys()
top_states = state_without_null[(state_without_null.RegionName == 'NewYork') | (state_without_null.RegionName == 'Washington') | (state_without_null.RegionName == 'Connecticut') | (state_without_null.RegionName == 'Maryland') | (state_without_null.RegionName == 'NewJersey') | (state_without_null.RegionName == 'Alaska') | (state_without_null.RegionName == 'Massachusetts') | (state_without_null.RegionName == 'California') | (state_without_null.RegionName == 'Hawaii') | (state_without_null.RegionName == 'DistrictofColumbia')]
pd.pivot_table(state_without_null, index='Date', columns='RegionName', values='ZHVIPerSqft_AllHomes') | code |
2041588/cell_17 | [
"text_html_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys()
top_states = state_without_null[(state_without_null.RegionName == 'NewYork') | (state_without_null.RegionName == 'Washington') | (state_without_null.RegionName == 'Connecticut') | (state_without_null.RegionName == 'Maryland') | (state_without_null.RegionName == 'NewJersey') | (state_without_null.RegionName == 'Alaska') | (state_without_null.RegionName == 'Massachusetts') | (state_without_null.RegionName == 'California') | (state_without_null.RegionName == 'Hawaii') | (state_without_null.RegionName == 'DistrictofColumbia')]
pd.pivot_table(state_without_null, index='Date', columns='RegionName', values='ZHVIPerSqft_AllHomes')
city_time_series.Date = pd.to_datetime(city_time_series.Date)
city_time_series.head() | code |
2041588/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).keys()
top_states = state_without_null[(state_without_null.RegionName == 'NewYork') | (state_without_null.RegionName == 'Washington') | (state_without_null.RegionName == 'Connecticut') | (state_without_null.RegionName == 'Maryland') | (state_without_null.RegionName == 'NewJersey') | (state_without_null.RegionName == 'Alaska') | (state_without_null.RegionName == 'Massachusetts') | (state_without_null.RegionName == 'California') | (state_without_null.RegionName == 'Hawaii') | (state_without_null.RegionName == 'DistrictofColumbia')]
pd.pivot_table(top_states, index='Date', columns='RegionName', values='ZHVIPerSqft_AllHomes').plot(kind='line', figsize=(20, 15), legend=True)
plt.xlabel('Year', fontsize=15)
plt.ylabel('per sqft', fontsize=15)
plt.title('Changes is price overtime for top ten expensive states', fontsize=24) | code |
2041588/cell_10 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cities_crosswalk = pd.read_csv('../input/cities_crosswalk.csv')
city_time_series = pd.read_csv('../input/City_time_series.csv')
county_time_series = pd.read_csv('../input/County_time_series.csv')
metro_time_series = pd.read_csv('../input/Metro_time_series.csv')
neighborhood_time_series = pd.read_csv('../input/Neighborhood_time_series.csv')
state_time_series = pd.read_csv('../input/State_time_series.csv')
zip_time_series = pd.read_csv('../input/Zip_time_series.csv')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZHVI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
state_without_null.groupby(state_without_null.RegionName)['ZHVI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
state_time_series.Date = pd.to_datetime(state_time_series.Date)
state_without_null = state_time_series.dropna(subset=['ZRI_AllHomes'], how='any')
state_without_null.Date = state_without_null.Date.dt.year
plt.subplots(figsize=(14, 6))
state_without_null.groupby(state_without_null.RegionName)['ZRI_AllHomes'].mean().sort_values().tail(10).plot(kind='bar')
plt.title('Top States with Most Rent Values According to Zillow', fontsize=25)
plt.xlabel('States')
plt.ylabel('dollar amount') | code |
2041588/cell_5 | [
"text_html_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight') | code |
32071028/cell_9 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import BertTokenizer, BertForQuestionAnswering
from wasabi import msg
import json
import os
import pandas as pd
import time
import torch
import pandas as pd
import json
import os
meta_path = '/kaggle/input/CORD-19-research-challenge/metadata.csv'
def get_data_texts():
meta_data = pd.read_csv(meta_path, low_memory=True)
paperID2year = {}
for _, meta_row in meta_data.iterrows():
if pd.notnull(meta_row['pmcid']):
paperID2year[meta_row['pmcid']] = meta_row['publish_time']
if pd.notnull(meta_row['sha']):
paper_ids = meta_row['sha'].split('; ')
for paper_id in paper_ids:
paperID2year[paper_id] = meta_row['publish_time']
data_text = {}
index2paperID = {}
index2paperPath = {}
i = 0
for dirname, _, filenames in os.walk('/kaggle/input/CORD-19-research-challenge'):
for filename in filenames:
paper_path = os.path.join(dirname, filename)
if paper_path[-4:] != 'json':
continue
with open(paper_path) as json_file:
article_data = json.load(json_file)
paper_date = paperID2year.get(article_data['paper_id'], None)
if paper_date:
if paper_date[0:4] == '2020':
data_text[article_data['paper_id']] = ' '.join([d['text'] for d in article_data['body_text']])
index2paperID[i] = article_data['paper_id']
index2paperPath[i] = paper_path
i += 1
return (data_text, index2paperID, index2paperPath)
data_text, index2paperID, index2paperPath = get_data_texts()
from transformers import BertTokenizer, BertForQuestionAnswering
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import torch
from wasabi import msg
import time
class QuestionCovid:
def __init__(self, TOKENIZER, MODEL, index2paperID, index2paperPath):
self.TOKENIZER = TOKENIZER
self.MODEL = MODEL
self.index2paperID = index2paperID
self.index2paperPath = index2paperPath
def fit(self, data_text):
self.TFIDF_VECTORIZER = TfidfVectorizer()
with msg.loading(' Fitting TFIDF'):
start = time.time()
self.TFIDF_VECTORIZER.fit(data_text.values())
msg.good(' TFIDF fitted - Took {:.2f}s'.format(time.time() - start))
with msg.loading(' Creating Articles matrix'):
start = time.time()
self.ARTICLES_MATRIX = self.TFIDF_VECTORIZER.transform(data_text.values())
msg.good(' Article matrix created - Took {:.2f}s'.format(time.time() - start))
def get_answer(self, text, question):
input_text = '[CLS] ' + question + ' [SEP] ' + text + ' [SEP]'
input_ids = self.TOKENIZER.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = self.MODEL(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = self.TOKENIZER.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1])
score = round(start_scores.max().item(), 2)
return (answer, score)
def predict(self, question):
query = self.TFIDF_VECTORIZER.transform([question + ' covid'])
best_matches = sorted([(i, c) for i, c in enumerate(cosine_similarity(query, self.ARTICLES_MATRIX).ravel())], key=lambda x: x[1], reverse=True)
for i, tfidf_score in best_matches[:5]:
best_score = 0
best_answer = 'No answer'
best_text = 'No snippet'
paper_path = self.index2paperPath[i]
with open(paper_path) as json_file:
article_data = json.load(json_file)
text = ' '.join([d['text'] for d in article_data['body_text']])
sentences = text.split('.')
n = 3
sentences_grouped = ['.'.join(sentences[i:i + n]) for i in range(0, len(sentences), n)]
for subtext in sentences_grouped:
answer, score = self.get_answer(subtext, question)
if score > best_score:
best_score = score
best_answer = answer
best_text = subtext
yield (self.index2paperID[i], best_answer, best_score, best_text, tfidf_score)
TOKENIZER = BertTokenizer.from_pretrained('bert-base-uncased')
MODEL = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
covid_q = QuestionCovid(TOKENIZER, MODEL, index2paperID, index2paperPath)
covid_q.fit(data_text) | code |
32071028/cell_8 | [
"text_plain_output_1.png"
] | from transformers import BertTokenizer, BertForQuestionAnswering
TOKENIZER = BertTokenizer.from_pretrained('bert-base-uncased')
MODEL = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad') | code |
32071028/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import BertTokenizer, BertForQuestionAnswering
from wasabi import msg
import json
import os
import pandas as pd
import time
import torch
import pandas as pd
import json
import os
meta_path = '/kaggle/input/CORD-19-research-challenge/metadata.csv'
def get_data_texts():
meta_data = pd.read_csv(meta_path, low_memory=True)
paperID2year = {}
for _, meta_row in meta_data.iterrows():
if pd.notnull(meta_row['pmcid']):
paperID2year[meta_row['pmcid']] = meta_row['publish_time']
if pd.notnull(meta_row['sha']):
paper_ids = meta_row['sha'].split('; ')
for paper_id in paper_ids:
paperID2year[paper_id] = meta_row['publish_time']
data_text = {}
index2paperID = {}
index2paperPath = {}
i = 0
for dirname, _, filenames in os.walk('/kaggle/input/CORD-19-research-challenge'):
for filename in filenames:
paper_path = os.path.join(dirname, filename)
if paper_path[-4:] != 'json':
continue
with open(paper_path) as json_file:
article_data = json.load(json_file)
paper_date = paperID2year.get(article_data['paper_id'], None)
if paper_date:
if paper_date[0:4] == '2020':
data_text[article_data['paper_id']] = ' '.join([d['text'] for d in article_data['body_text']])
index2paperID[i] = article_data['paper_id']
index2paperPath[i] = paper_path
i += 1
return (data_text, index2paperID, index2paperPath)
data_text, index2paperID, index2paperPath = get_data_texts()
from transformers import BertTokenizer, BertForQuestionAnswering
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import torch
from wasabi import msg
import time
class QuestionCovid:
def __init__(self, TOKENIZER, MODEL, index2paperID, index2paperPath):
self.TOKENIZER = TOKENIZER
self.MODEL = MODEL
self.index2paperID = index2paperID
self.index2paperPath = index2paperPath
def fit(self, data_text):
self.TFIDF_VECTORIZER = TfidfVectorizer()
with msg.loading(' Fitting TFIDF'):
start = time.time()
self.TFIDF_VECTORIZER.fit(data_text.values())
msg.good(' TFIDF fitted - Took {:.2f}s'.format(time.time() - start))
with msg.loading(' Creating Articles matrix'):
start = time.time()
self.ARTICLES_MATRIX = self.TFIDF_VECTORIZER.transform(data_text.values())
msg.good(' Article matrix created - Took {:.2f}s'.format(time.time() - start))
def get_answer(self, text, question):
input_text = '[CLS] ' + question + ' [SEP] ' + text + ' [SEP]'
input_ids = self.TOKENIZER.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = self.MODEL(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = self.TOKENIZER.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1])
score = round(start_scores.max().item(), 2)
return (answer, score)
def predict(self, question):
query = self.TFIDF_VECTORIZER.transform([question + ' covid'])
best_matches = sorted([(i, c) for i, c in enumerate(cosine_similarity(query, self.ARTICLES_MATRIX).ravel())], key=lambda x: x[1], reverse=True)
for i, tfidf_score in best_matches[:5]:
best_score = 0
best_answer = 'No answer'
best_text = 'No snippet'
paper_path = self.index2paperPath[i]
with open(paper_path) as json_file:
article_data = json.load(json_file)
text = ' '.join([d['text'] for d in article_data['body_text']])
sentences = text.split('.')
n = 3
sentences_grouped = ['.'.join(sentences[i:i + n]) for i in range(0, len(sentences), n)]
for subtext in sentences_grouped:
answer, score = self.get_answer(subtext, question)
if score > best_score:
best_score = score
best_answer = answer
best_text = subtext
yield (self.index2paperID[i], best_answer, best_score, best_text, tfidf_score)
TOKENIZER = BertTokenizer.from_pretrained('bert-base-uncased')
MODEL = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
covid_q = QuestionCovid(TOKENIZER, MODEL, index2paperID, index2paperPath)
covid_q.fit(data_text)
challenge_tasks = [{'task': 'What is known about transmission, incubation, and environmental stability?', 'questions': ['Is the virus transmitted by aerosol, droplets, food, close contact, fecal matter, or water?', 'How long is the incubation period for the virus?', 'Can the virus be transmitted asymptomatically or during the incubation period?', 'How does weather, heat, and humidity affect the tramsmission of 2019-nCoV?', 'How long can the 2019-nCoV virus remain viable on common surfaces?']}, {'task': 'What do we know about COVID-19 risk factors?', 'questions': ['What risk factors contribute to the severity of 2019-nCoV?', 'How does hypertension affect patients?', 'How does heart disease affect patients?', 'How does copd affect patients?', 'How does smoking affect patients?', 'How does pregnancy affect patients?', 'What is the fatality rate of 2019-nCoV?', 'What public health policies prevent or control the spread of 2019-nCoV?']}, {'task': 'What do we know about virus genetics, origin, and evolution?', 'questions': ['Can animals transmit 2019-nCoV?', 'What animal did 2019-nCoV come from?', 'What real-time genomic tracking tools exist?', 'What geographic variations are there in the genome of 2019-nCoV?', 'What effors are being done in asia to prevent further outbreaks?']}, {'task': 'What do we know about vaccines and therapeutics?', 'questions': ['What drugs or therapies are being investigated?', 'Are anti-inflammatory drugs recommended?']}, {'task': 'What do we know about non-pharmaceutical interventions?', 'questions': ['Which non-pharmaceutical interventions limit tramsission?', 'What are most important barriers to compliance?']}, {'task': 'What has been published about medical care?', 'questions': ['How does extracorporeal membrane oxygenation affect 2019-nCoV patients?', 'What telemedicine and cybercare methods are most effective?', 'How is artificial intelligence being used in real time health delivery?', 'What adjunctive or supportive methods can help patients?']}, {'task': 'What do we know about diagnostics and surveillance?', 'questions': ['What diagnostic tests (tools) exist or are being developed to detect 2019-nCoV?']}, {'task': 'Other interesting questions', 'questions': ['What is the immune system response to 2019-nCoV?', 'Can personal protective equipment prevent the transmission of 2019-nCoV?', 'Can 2019-nCoV infect patients a second time?']}]
question = 'How long is the incubation period for the virus?'
with open('/kaggle/working/answers.jsonl', 'w') as f:
for task_id, task in enumerate(challenge_tasks):
task_question = task['task']
msg.text(f'Task {task_id}: {task_question}')
questions = task['questions']
for question_id, question in enumerate(questions):
with msg.loading(f'Answering question: {question}'):
start = time.time()
for i, (paper_id, answer, score, snippet, tfidf_score) in enumerate(covid_q.predict(question)):
chunk = json.dumps({'task_id': task_id, 'task': task_question, 'question_id': question_id, 'question': question, 'paper_id': paper_id, 'answer': answer, 'snippet': snippet, 'bert_score': score, 'tfidf_score': tfidf_score})
f.write(chunk + '\n')
msg.text('\n')
msg.text(f'Answer {i}: {answer}')
time_elapsed = time.time() - start
msg.good(f'Question {question_id} answered - Took {time_elapsed}s') | code |
32071028/cell_14 | [
"text_plain_output_5.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import BertTokenizer, BertForQuestionAnswering
from wasabi import msg
import json
import os
import pandas as pd
import time
import torch
import pandas as pd
import json
import os
meta_path = '/kaggle/input/CORD-19-research-challenge/metadata.csv'
def get_data_texts():
meta_data = pd.read_csv(meta_path, low_memory=True)
paperID2year = {}
for _, meta_row in meta_data.iterrows():
if pd.notnull(meta_row['pmcid']):
paperID2year[meta_row['pmcid']] = meta_row['publish_time']
if pd.notnull(meta_row['sha']):
paper_ids = meta_row['sha'].split('; ')
for paper_id in paper_ids:
paperID2year[paper_id] = meta_row['publish_time']
data_text = {}
index2paperID = {}
index2paperPath = {}
i = 0
for dirname, _, filenames in os.walk('/kaggle/input/CORD-19-research-challenge'):
for filename in filenames:
paper_path = os.path.join(dirname, filename)
if paper_path[-4:] != 'json':
continue
with open(paper_path) as json_file:
article_data = json.load(json_file)
paper_date = paperID2year.get(article_data['paper_id'], None)
if paper_date:
if paper_date[0:4] == '2020':
data_text[article_data['paper_id']] = ' '.join([d['text'] for d in article_data['body_text']])
index2paperID[i] = article_data['paper_id']
index2paperPath[i] = paper_path
i += 1
return (data_text, index2paperID, index2paperPath)
data_text, index2paperID, index2paperPath = get_data_texts()
from transformers import BertTokenizer, BertForQuestionAnswering
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import torch
from wasabi import msg
import time
class QuestionCovid:
def __init__(self, TOKENIZER, MODEL, index2paperID, index2paperPath):
self.TOKENIZER = TOKENIZER
self.MODEL = MODEL
self.index2paperID = index2paperID
self.index2paperPath = index2paperPath
def fit(self, data_text):
self.TFIDF_VECTORIZER = TfidfVectorizer()
with msg.loading(' Fitting TFIDF'):
start = time.time()
self.TFIDF_VECTORIZER.fit(data_text.values())
msg.good(' TFIDF fitted - Took {:.2f}s'.format(time.time() - start))
with msg.loading(' Creating Articles matrix'):
start = time.time()
self.ARTICLES_MATRIX = self.TFIDF_VECTORIZER.transform(data_text.values())
msg.good(' Article matrix created - Took {:.2f}s'.format(time.time() - start))
def get_answer(self, text, question):
input_text = '[CLS] ' + question + ' [SEP] ' + text + ' [SEP]'
input_ids = self.TOKENIZER.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = self.MODEL(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = self.TOKENIZER.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1])
score = round(start_scores.max().item(), 2)
return (answer, score)
def predict(self, question):
query = self.TFIDF_VECTORIZER.transform([question + ' covid'])
best_matches = sorted([(i, c) for i, c in enumerate(cosine_similarity(query, self.ARTICLES_MATRIX).ravel())], key=lambda x: x[1], reverse=True)
for i, tfidf_score in best_matches[:5]:
best_score = 0
best_answer = 'No answer'
best_text = 'No snippet'
paper_path = self.index2paperPath[i]
with open(paper_path) as json_file:
article_data = json.load(json_file)
text = ' '.join([d['text'] for d in article_data['body_text']])
sentences = text.split('.')
n = 3
sentences_grouped = ['.'.join(sentences[i:i + n]) for i in range(0, len(sentences), n)]
for subtext in sentences_grouped:
answer, score = self.get_answer(subtext, question)
if score > best_score:
best_score = score
best_answer = answer
best_text = subtext
yield (self.index2paperID[i], best_answer, best_score, best_text, tfidf_score)
TOKENIZER = BertTokenizer.from_pretrained('bert-base-uncased')
MODEL = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
covid_q = QuestionCovid(TOKENIZER, MODEL, index2paperID, index2paperPath)
covid_q.fit(data_text)
question = 'How long is the incubation period for the virus?'
for i, (paper_id, answer, score, snippet, tfidf_score) in enumerate(covid_q.predict(question)):
print(f'Answer {i}: {answer}')
print(f'Text segment: {snippet}')
print(f'Paper id: {paper_id}') | code |
122264416/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
Aquifer_Petrignano.dropna(subset=['Depth_to_Groundwater_P25'], inplace=True)
Lake_Bilancino.dropna(subset=['Flow_Rate'], inplace=True)
Aquifer_Petrignano.describe() | code |
122264416/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1) | code |
122264416/cell_26 | [
"text_html_output_1.png"
] | from datetime import datetime, date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
def fillna_from_list(column, list_):
"""Function get column and list for replace missing values"""
list_for_df = []
i = -1
for el in column:
if el == -1000000:
i += 1
list_for_df.append(list_.iloc[i + 1])
else:
list_for_df.append(el)
return list_for_df
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
Aquifer_Petrignano.dropna(subset=['Depth_to_Groundwater_P25'], inplace=True)
Lake_Bilancino.dropna(subset=['Flow_Rate'], inplace=True)
# To compelte the data, as naive method, we will use ffill
f, ax = plt.subplots(nrows=7, ncols=1, figsize=(20, 50))
for i, column in enumerate(Aquifer_Petrignano.drop('Date', axis=1).columns):
sns.lineplot(x=Aquifer_Petrignano['Date'], y=Aquifer_Petrignano[column], ax=ax[i], color='dodgerblue')
ax[i].set_title('Feature: {}'.format(column), fontsize=14)
ax[i].set_ylabel(ylabel=column, fontsize=14)
ax[i].set_xlim([date(2006, 1, 1), date(2020, 6, 30)])
Aquifer_Petrignano.drop(['Depth_to_Groundwater_P24', 'Temperature_Petrignano'], axis=1, inplace=True)
print('Max date from Aquifer Temperature:', max(Aquifer_Petrignano[Aquifer_Petrignano['Temperature_Bastia_Umbra'].isna() == False]['Date']), '\nMin data from Temperature:', min(Aquifer_Petrignano[Aquifer_Petrignano['Temperature_Bastia_Umbra'].isna() == False]['Date']))
print('Count days from Aquifer Temperature:', int((max(Aquifer_Petrignano[Aquifer_Petrignano['Temperature_Bastia_Umbra'].isna() == False]['Date']) - min(Aquifer_Petrignano[Aquifer_Petrignano['Temperature_Bastia_Umbra'].isna() == False]['Date'])) / np.timedelta64(1, 'D'))) | code |
122264416/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano.info() | code |
122264416/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
print('Max date from Aquifer Petrignano:', max(Aquifer_Petrignano.Date), '\nMin data from AquiferPetrignano:', min(Aquifer_Petrignano.Date))
print('Count days from Aquifer Petrignano:', int((max(Aquifer_Petrignano.Date) - min(Aquifer_Petrignano.Date)) / np.timedelta64(1, 'D')))
print('Lenght data from Aquifer Petrignano:', len(Aquifer_Petrignano))
print('\nMax date from Lake Bilancino :', max(Lake_Bilancino.Date), '\nMin data from Lake Bilancino:', min(Lake_Bilancino.Date))
print('Count days from Lake Bilancino:', int((max(Lake_Bilancino.Date) - min(Lake_Bilancino.Date)) / np.timedelta64(1, 'D')))
print('Lenght data from Lake Bilancino:', len(Lake_Bilancino)) | code |
122264416/cell_8 | [
"image_output_1.png"
] | import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Lake_Bilancino.info() | code |
122264416/cell_15 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
msno.heatmap(Aquifer_Petrignano) | code |
122264416/cell_16 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
msno.heatmap(Lake_Bilancino) | code |
122264416/cell_24 | [
"text_html_output_1.png"
] | from datetime import datetime, date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
def fillna_from_list(column, list_):
"""Function get column and list for replace missing values"""
list_for_df = []
i = -1
for el in column:
if el == -1000000:
i += 1
list_for_df.append(list_.iloc[i + 1])
else:
list_for_df.append(el)
return list_for_df
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
Aquifer_Petrignano.dropna(subset=['Depth_to_Groundwater_P25'], inplace=True)
Lake_Bilancino.dropna(subset=['Flow_Rate'], inplace=True)
f, ax = plt.subplots(nrows=7, ncols=1, figsize=(20, 50))
for i, column in enumerate(Aquifer_Petrignano.drop('Date', axis=1).columns):
sns.lineplot(x=Aquifer_Petrignano['Date'], y=Aquifer_Petrignano[column], ax=ax[i], color='dodgerblue')
ax[i].set_title('Feature: {}'.format(column), fontsize=14)
ax[i].set_ylabel(ylabel=column, fontsize=14)
ax[i].set_xlim([date(2006, 1, 1), date(2020, 6, 30)]) | code |
122264416/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date'], format='%d/%m/%Y')
Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date'], format='%d/%m/%Y')
pd.concat([pd.Series(Aquifer_Petrignano.isnull().sum()).rename_axis('Aquifer_Petrignano_features/target').to_frame('Missing Value Count').reset_index(), pd.Series(Lake_Bilancino.isnull().sum()).rename_axis('Lake_Bilancino_features/target').to_frame('Missing Value Count').reset_index()], axis=1)
Aquifer_Petrignano.dropna(subset=['Depth_to_Groundwater_P25'], inplace=True)
Lake_Bilancino.dropna(subset=['Flow_Rate'], inplace=True)
Lake_Bilancino.describe() | code |
122264416/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
Aquifer_Petrignano_input = pd.read_csv('../input/acea-water-prediction/Aquifer_Petrignano.csv')
Lake_Bilancino_input = pd.read_csv('../input/acea-water-prediction/Lake_Bilancino.csv')
Aquifer_Petrignano = Aquifer_Petrignano_input.copy()
Lake_Bilancino = Lake_Bilancino_input.copy()
Aquifer_Petrignano.head() | code |
72063152/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
print('X_train data shape: ', X_train_scaled.shape)
print('X_val data shape: ', X_val_scaled.shape)
print('y_train shape: ', y_train.shape)
print('y_val shape: ', y_val.shape) | code |
72063152/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
sub | code |
72063152/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
target_pred_lr = lr.predict(X_test_scaled)
target_pred_lr | code |
72063152/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.head() | code |
72063152/cell_23 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
print('RMSE for Linear Regression Model: ', np.sqrt(mse(y_val, y_preds_lr))) | code |
72063152/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
sub_lr = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
target_pred_ridge = ridge.predict(X_test_scaled)
sub_ridge = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
sub_ridge['target'] = target_pred_ridge
sub_ridge.to_csv('sub_ridge.csv', index=False)
sub_ridge.head() | code |
72063152/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
test.head() | code |
72063152/cell_40 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(X_train_scaled, y_train)
y_preds_tree = tree_reg.predict(X_val_scaled)
for_reg = RandomForestRegressor()
for_reg.fit(X_train_scaled, y_train)
y_preds_for = for_reg.predict(X_val_scaled)
target_pred_for = for_reg.predict(X_test_scaled)
target_pred_for | code |
72063152/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
print('RMSE of Ridge Regression: ', np.sqrt(mse(y_val, y_preds_ridge))) | code |
72063152/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(X_train_scaled, y_train)
y_preds_tree = tree_reg.predict(X_val_scaled)
for_reg = RandomForestRegressor()
for_reg.fit(X_train_scaled, y_train)
y_preds_for = for_reg.predict(X_val_scaled)
print('RMSE for Random Forest Regressor: ', np.sqrt(mse(y_val, y_preds_for))) | code |
72063152/cell_41 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
sub_lr = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
sub_ridge = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
tree_reg = DecisionTreeRegressor()
tree_reg.fit(X_train_scaled, y_train)
y_preds_tree = tree_reg.predict(X_val_scaled)
for_reg = RandomForestRegressor()
for_reg.fit(X_train_scaled, y_train)
y_preds_for = for_reg.predict(X_val_scaled)
target_pred_for = for_reg.predict(X_test_scaled)
target_pred_for
sub_for = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
sub_for['target'] = target_pred_for
sub_for.head() | code |
72063152/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
test.info() | code |
72063152/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
X_test.info() | code |
72063152/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
X_train.head() | code |
72063152/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
X_train.info() | code |
72063152/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
X_test.head() | code |
72063152/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(X_train_scaled, y_train)
y_preds_tree = tree_reg.predict(X_val_scaled)
print('RMSE for Decision Tree Regressor: ', np.sqrt(mse(y_val, y_preds_tree))) | code |
72063152/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
target_pred_ridge = ridge.predict(X_test_scaled)
target_pred_ridge | code |
72063152/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
print('Training Data Shape: ', train.shape)
print('Testing Data Shape: ', test.shape) | code |
72063152/cell_27 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
target_pred_lr = lr.predict(X_test_scaled)
sub_lr = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
sub_lr['target'] = target_pred_lr
sub_lr.to_csv('sub_lr.csv', index=False)
sub_lr.head() | code |
72063152/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.info() | code |
72063152/cell_36 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train = train.drop('id', axis=1)
test = test.drop('id', axis=1)
cols = test.columns
X_train = train[cols]
X_test = test.copy()
y = train['target']
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lr = LinearRegression()
lr.fit(X_train_scaled, y_train)
y_preds_lr = lr.predict(X_val_scaled)
ridge = Ridge()
ridge.fit(X_train_scaled, y_train)
y_preds_ridge = ridge.predict(X_val_scaled)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(X_train_scaled, y_train)
y_preds_tree = tree_reg.predict(X_val_scaled)
y_preds_tree | code |
128012943/cell_42 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures
x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_testfeatures
x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1)
x_train_new
x_train_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_train_new
x_test_new = pd.concat([x_test, x_testfeatures.set_axis(x_test.index)], axis=1)
x_test_new
x_test_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_test_new
from sklearn.ensemble import RandomForestRegressor
RFR_model = RandomForestRegressor(n_estimators=100, criterion='squared_error', random_state=1, n_jobs=-1)
RFR_model.fit(x_train_new, y_train)
y_pred_RFR = RFR_model.predict(x_test_new)
from sklearn import metrics
from sklearn.tree import DecisionTreeRegressor
DT_regressor = DecisionTreeRegressor(max_depth=10)
DT_regressor.fit(x_train_new, y_train)
y_pred_DT = DT_regressor.predict(x_test_new)
print(metrics.r2_score(y_test, y_pred_DT)) | code |
128012943/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
ax = sns.lmplot(x='age', y='expenses', data=df_insure, hue='smoker', palette='Set1') | code |
128012943/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
sns.displot(data=df_insure['expenses'])
plt.show() | code |
128012943/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure['smoker'].value_counts() | code |
128012943/cell_25 | [
"image_output_1.png"
] | x_train.head() | code |
128012943/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure.head(5) | code |
128012943/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures
x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_testfeatures
x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1)
x_train_new | code |
128012943/cell_30 | [
"image_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_train_array | code |
128012943/cell_33 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures
x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_testfeatures | code |
128012943/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure.groupby('children')['expenses'].sum().plot(kind='bar')
plt.ylabel('Insurance charges')
plt.show() | code |
128012943/cell_40 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures
x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_testfeatures
x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1)
x_train_new
x_train_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_train_new
x_test_new = pd.concat([x_test, x_testfeatures.set_axis(x_test.index)], axis=1)
x_test_new
x_test_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_test_new
from sklearn.linear_model import LinearRegression
LR_model = LinearRegression()
LR_model.fit(x_train_new, y_train)
y_pred = LR_model.predict(x_test_new)
r2_score(y_test, y_pred) | code |
128012943/cell_26 | [
"image_output_1.png"
] | x_train.shape | code |
128012943/cell_41 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures
x_testfeatures = pd.DataFrame(x_test_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_testfeatures
x_train_new = pd.concat([x_train, x_trainfeatures.set_axis(x_train.index)], axis=1)
x_train_new
x_train_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_train_new
x_test_new = pd.concat([x_test, x_testfeatures.set_axis(x_test.index)], axis=1)
x_test_new
x_test_new.drop(['sex', 'smoker', 'region'], axis=1, inplace=True)
x_test_new
from sklearn.ensemble import RandomForestRegressor
RFR_model = RandomForestRegressor(n_estimators=100, criterion='squared_error', random_state=1, n_jobs=-1)
RFR_model.fit(x_train_new, y_train)
y_pred_RFR = RFR_model.predict(x_test_new)
from sklearn import metrics
print(metrics.r2_score(y_test, y_pred_RFR)) | code |
128012943/cell_2 | [
"image_output_1.png"
] | import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
import warnings
warnings.filterwarnings('ignore') | code |
128012943/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure.groupby('region')['expenses'].sum().plot(kind='bar')
plt.ylabel('Insurance charges')
plt.show() | code |
128012943/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128012943/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure.describe() | code |
128012943/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
df_insure.groupby('region')['smoker'].count().plot(kind='bar')
plt.ylabel('No. of smokers')
plt.show() | code |
128012943/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_insure = pd.read_csv('/kaggle/input/medical-insurance/med-insurance.csv')
x_train.shape
x_test.shape
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(drop='first')
x_train_array = ohe.fit_transform(x_train[['sex', 'smoker', 'region']]).toarray()
x_test_array = ohe.transform(x_test[['sex', 'smoker', 'region']]).toarray()
x_trainfeatures = pd.DataFrame(x_train_array, columns=['male', 'smokes', 'northwest', 'southeast', 'southwest'])
x_trainfeatures | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.