path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2017333/cell_17 | [
"text_html_output_1.png"
] | from sklearn import svm
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0)
train['Age'] = train['Age'].fillna(np.mean(train['Age']))
train['Fare'] = train['Fare'].fillna(np.mean(train['Fare']))
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, Y_train)
clf.score(X_train, Y_train)
clf.fit(X_test, Y_test)
clf.score(X_test, Y_test)
test['Sex'] = test['Sex'].apply(lambda x: 1 if x == 'male' else 0)
test['Age'] = test['Age'].fillna(np.mean(test['Age']))
test['Fare'] = test['Fare'].fillna(np.mean(test['Fare']))
test = test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']]
results = clf.predict(test)
results | code |
2017333/cell_14 | [
"text_html_output_1.png"
] | from sklearn import svm
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, Y_train)
clf.score(X_train, Y_train)
clf.fit(X_test, Y_test) | code |
2017333/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0)
train = train[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']]
X = train.drop('Survived', axis=1)
Y = train['Survived']
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8, random_state=0) | code |
2017333/cell_12 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, Y_train) | code |
2017333/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0)
train.head() | code |
2017393/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors']
air_reserve.columns = cols
hpg_reserve.columns = cols
reserves = pd.DataFrame(columns=cols)
reserves = pd.concat([air_reserve, hpg_reserve])
sns.set(color_codes=True)
visitors = reserves['reserve_visitors']
sns.set(color_codes=True)
visitors = visits['visitors']
sns.distplot(visitors, color='y') | code |
2017393/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors']
air_reserve.columns = cols
hpg_reserve.columns = cols
reserves = pd.DataFrame(columns=cols)
reserves = pd.concat([air_reserve, hpg_reserve])
reserves.info()
reserves.describe() | code |
2017393/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Number of unique areas = ', str(len(air_store_info['air_area_name'].unique()))) | code |
2017393/cell_4 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import missingno as msno # check for missing values
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
msno.matrix(air_reserve)
msno.matrix(hpg_reserve)
msno.matrix(visits)
msno.matrix(air_store_info)
msno.matrix(hpg_store_info) | code |
2017393/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_store_info.info()
air_store_info.head() | code |
2017393/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Number of unique areas = ', str(len(hpg_store_info['hpg_area_name'].unique()))) | code |
2017393/cell_20 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Number of Air restaurants = ', str(len(air_store_info)))
print('Number of hpg restaurants = ', str(len(hpg_store_info))) | code |
2017393/cell_29 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Genres:')
hpg_store_info['hpg_genre_name'].unique() | code |
2017393/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_store_info['air_area_name'].unique() | code |
2017393/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors']
air_reserve.columns = cols
hpg_reserve.columns = cols
reserves = pd.DataFrame(columns=cols)
reserves = pd.concat([air_reserve, hpg_reserve])
sns.set(color_codes=True)
visitors = reserves['reserve_visitors']
sns.distplot(visitors) | code |
2017393/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve.head() | code |
2017393/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
dates.head() | code |
2017393/cell_28 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
hpg_store_info.info()
hpg_store_info.head() | code |
2017393/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
hpg_reserve.head() | code |
2017393/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Number of Air restaurants = ', str(len(visits['air_store_id'].unique()))) | code |
2017393/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
relation.info() | code |
2017393/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
hpg_store_info['hpg_area_name'].unique() | code |
2017393/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
print('Genres:')
air_store_info['air_genre_name'].unique() | code |
2017393/cell_14 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
visits.info()
visits.describe() | code |
2017393/cell_10 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
air_reserve = pd.read_csv('../input/air_reserve.csv')
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv')
visits = pd.read_csv('../input/air_visit_data.csv')
dates = pd.read_csv('../input/date_info.csv')
relation = pd.read_csv('../input/store_id_relation.csv')
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors']
air_reserve.columns = cols
hpg_reserve.columns = cols
reserves = pd.DataFrame(columns=cols)
reserves = pd.concat([air_reserve, hpg_reserve])
print('Number of restaurants with reservations from AirREGI = ', str(len(air_reserve['store_id'].unique())))
print('Number of restaurants with reservations from hpg = ', str(len(hpg_reserve['store_id'].unique()))) | code |
74054476/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
plt.scatter(X_train.iloc[:20], y_train.iloc[:20], color='Red')
plt.title('Training Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show() | code |
74054476/cell_19 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
plt.scatter(X_test, y_test, color='Red')
plt.plot(X_test, y_pred, color='Blue')
plt.title('Hypothesis over testing dataset')
plt.xlabel('X')
plt.ylabel('y')
plt.show() | code |
74054476/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74054476/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
plt.scatter(X_train, y_train, color='Red')
plt.plot(X_train, regressor.predict(X_train), color='Blue')
plt.title('Hypothesis over training data')
plt.xlabel('X')
plt.ylabel('y')
plt.show() | code |
74054476/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
df_test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
df_train.head() | code |
74054476/cell_12 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train) | code |
74054476/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
df_test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
print(df_train.isnull().sum())
print(df_test.isnull().sum())
df_train.dropna(inplace=True)
df_test.dropna(inplace=True) | code |
49122760/cell_21 | [
"text_plain_output_1.png"
] | from keras import layers
from keras import models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
proj_dir = '../input/cassava-leaf-disease-classification/train_images/'
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train.loc[:, 'label'] = train.loc[:, 'label'].astype('str')
BATCH_SIZE = 64
SPLIT = 0.2
import tensorflow as tf
conv_base = tf.keras.models.load_model('../input/pretrained-models/vgg16')
conv_base.summary()
train_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen_v2.flow_from_dataframe(train, directory=proj_dir, x_col='image_id', y_col='label', target_size=(448, 448), batch_size=BATCH_SIZE, class_mode='categorical')
test_dir = '../input/cassava-leaf-disease-classification/test_images/'
test = pd.DataFrame()
test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/')
test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False)
train_generator.class_indices
with tf.device('/GPU:0'):
model = models.Sequential()
model.add(conv_base)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
from keras import optimizers
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2), optimizer='Adamax', metrics=['acc'])
history = model.fit_generator(train_generator, epochs=15, steps_per_epoch=len(train) / BATCH_SIZE)
test_generator.reset()
pred = model.predict_generator(test_generator, verbose=1, steps=len(test))
predicted_class_indices = np.argmax(pred, axis=1)
labels = train_generator.class_indices
labels = dict(((v, k) for k, v in labels.items()))
predictions = [labels[k] for k in predicted_class_indices]
filenames = test_generator.filenames
results = pd.DataFrame({'image_id': filenames, 'label': predictions}) | code |
49122760/cell_20 | [
"text_plain_output_1.png"
] | from keras import layers
from keras import models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
proj_dir = '../input/cassava-leaf-disease-classification/train_images/'
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train.loc[:, 'label'] = train.loc[:, 'label'].astype('str')
BATCH_SIZE = 64
SPLIT = 0.2
import tensorflow as tf
conv_base = tf.keras.models.load_model('../input/pretrained-models/vgg16')
conv_base.summary()
train_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen_v2.flow_from_dataframe(train, directory=proj_dir, x_col='image_id', y_col='label', target_size=(448, 448), batch_size=BATCH_SIZE, class_mode='categorical')
test_dir = '../input/cassava-leaf-disease-classification/test_images/'
test = pd.DataFrame()
test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/')
test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False)
train_generator.class_indices
with tf.device('/GPU:0'):
model = models.Sequential()
model.add(conv_base)
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
from keras import optimizers
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2), optimizer='Adamax', metrics=['acc'])
history = model.fit_generator(train_generator, epochs=15, steps_per_epoch=len(train) / BATCH_SIZE) | code |
49122760/cell_19 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
proj_dir = '../input/cassava-leaf-disease-classification/train_images/'
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train.loc[:, 'label'] = train.loc[:, 'label'].astype('str')
BATCH_SIZE = 64
SPLIT = 0.2
train_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen_v2.flow_from_dataframe(train, directory=proj_dir, x_col='image_id', y_col='label', target_size=(448, 448), batch_size=BATCH_SIZE, class_mode='categorical')
test_dir = '../input/cassava-leaf-disease-classification/test_images/'
test = pd.DataFrame()
test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/')
test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False)
train_generator.class_indices | code |
49122760/cell_18 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
proj_dir = '../input/cassava-leaf-disease-classification/train_images/'
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train.loc[:, 'label'] = train.loc[:, 'label'].astype('str')
BATCH_SIZE = 64
SPLIT = 0.2
train_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen_v2.flow_from_dataframe(train, directory=proj_dir, x_col='image_id', y_col='label', target_size=(448, 448), batch_size=BATCH_SIZE, class_mode='categorical')
test_dir = '../input/cassava-leaf-disease-classification/test_images/'
test = pd.DataFrame()
test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/')
test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False) | code |
49122760/cell_5 | [
"text_plain_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
conv_base = tf.keras.models.load_model('../input/pretrained-models/vgg16')
conv_base.summary() | code |
33122543/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import datetime
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
name = 'Raghu'
from datetime import datetime
started_at = datetime.now().strftime('%H:%M:%S')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
def massage_data(data):
data['Honorific'] = data['Name']
titles = data['Name'].str.split(',')
for indx, title in enumerate(titles):
data['Honorific'][indx] = title[1].split('.')[0]
data['HonorificEnc'] = data['Honorific']
for indx, hon in enumerate(train_data.Honorific.value_counts().index):
data.HonorificEnc.replace(hon, indx, inplace=True)
data['CoPassengers'] = data['SibSp'] + data['Parch']
data.Sex.replace('male', 0, inplace=True)
data.Sex.replace('female', 1, inplace=True)
most_likely_fare = data['Fare'].mean()
data['Fare'] = data['Fare'].fillna(most_likely_fare)
most_likely_age = data['Age'].mean()
data['Age'] = data['Age'].fillna(most_likely_age)
most_likely_embarkation = data['Embarked'].mode()
data['Embarked'] = data['Embarked'].fillna(most_likely_embarkation)
data['EmbarkedEnc'] = data['Embarked']
data.EmbarkedEnc.replace('S', 0, inplace=True)
data.EmbarkedEnc.replace('C', 1, inplace=True)
data.EmbarkedEnc.replace('Q', 2, inplace=True)
return data
test_data = massage_data(test_data)
train_data = massage_data(train_data)
columns_for_fitting = ['Sex', 'CoPassengers', 'Pclass', 'Fare', 'Age', 'EmbarkedEnc']
data = [train_data, test_data]
X = train_data[columns_for_fitting]
y = train_data['Survived']
X1 = test_data[columns_for_fitting]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2, random_state=21)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
k = 4
while k < 5:
k = k + 1
X_train = preprocessing.StandardScaler().fit(X_train).transform(X_train.astype(float))
model = KNeighborsClassifier(n_neighbors=k).fit(X_train, Y_train)
X_test = preprocessing.StandardScaler().fit(X_test).transform(X_test.astype(float))
predictions = model.predict(X_test)
print('K and Accuracy are', k, accuracy_score(Y_test, predictions)) | code |
33122543/cell_6 | [
"text_plain_output_1.png"
] | from datetime import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
name = 'Raghu'
from datetime import datetime
started_at = datetime.now().strftime('%H:%M:%S')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
def massage_data(data):
data['Honorific'] = data['Name']
titles = data['Name'].str.split(',')
for indx, title in enumerate(titles):
data['Honorific'][indx] = title[1].split('.')[0]
data['HonorificEnc'] = data['Honorific']
for indx, hon in enumerate(train_data.Honorific.value_counts().index):
data.HonorificEnc.replace(hon, indx, inplace=True)
data['CoPassengers'] = data['SibSp'] + data['Parch']
data.Sex.replace('male', 0, inplace=True)
data.Sex.replace('female', 1, inplace=True)
most_likely_fare = data['Fare'].mean()
data['Fare'] = data['Fare'].fillna(most_likely_fare)
most_likely_age = data['Age'].mean()
data['Age'] = data['Age'].fillna(most_likely_age)
most_likely_embarkation = data['Embarked'].mode()
data['Embarked'] = data['Embarked'].fillna(most_likely_embarkation)
data['EmbarkedEnc'] = data['Embarked']
data.EmbarkedEnc.replace('S', 0, inplace=True)
data.EmbarkedEnc.replace('C', 1, inplace=True)
data.EmbarkedEnc.replace('Q', 2, inplace=True)
return data
test_data = massage_data(test_data)
train_data = massage_data(train_data)
columns_for_fitting = ['Sex', 'CoPassengers', 'Pclass', 'Fare', 'Age', 'EmbarkedEnc']
data = [train_data, test_data]
for c in columns_for_fitting:
print(c)
for d in data:
print(d[c].value_counts())
print('-----') | code |
33122543/cell_2 | [
"text_plain_output_1.png"
] | from datetime import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
name = 'Raghu'
from datetime import datetime
started_at = datetime.now().strftime('%H:%M:%S')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
print(train_data.head())
print(train_data.columns) | code |
33122543/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33122543/cell_8 | [
"text_plain_output_1.png"
] | from datetime import datetime
from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
name = 'Raghu'
from datetime import datetime
started_at = datetime.now().strftime('%H:%M:%S')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
def massage_data(data):
data['Honorific'] = data['Name']
titles = data['Name'].str.split(',')
for indx, title in enumerate(titles):
data['Honorific'][indx] = title[1].split('.')[0]
data['HonorificEnc'] = data['Honorific']
for indx, hon in enumerate(train_data.Honorific.value_counts().index):
data.HonorificEnc.replace(hon, indx, inplace=True)
data['CoPassengers'] = data['SibSp'] + data['Parch']
data.Sex.replace('male', 0, inplace=True)
data.Sex.replace('female', 1, inplace=True)
most_likely_fare = data['Fare'].mean()
data['Fare'] = data['Fare'].fillna(most_likely_fare)
most_likely_age = data['Age'].mean()
data['Age'] = data['Age'].fillna(most_likely_age)
most_likely_embarkation = data['Embarked'].mode()
data['Embarked'] = data['Embarked'].fillna(most_likely_embarkation)
data['EmbarkedEnc'] = data['Embarked']
data.EmbarkedEnc.replace('S', 0, inplace=True)
data.EmbarkedEnc.replace('C', 1, inplace=True)
data.EmbarkedEnc.replace('Q', 2, inplace=True)
return data
test_data = massage_data(test_data)
train_data = massage_data(train_data)
columns_for_fitting = ['Sex', 'CoPassengers', 'Pclass', 'Fare', 'Age', 'EmbarkedEnc']
data = [train_data, test_data]
X = train_data[columns_for_fitting]
y = train_data['Survived']
X1 = test_data[columns_for_fitting]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2, random_state=21)
print('Train set:', X_train.shape, Y_train.shape)
print('Test set:', X_test.shape, Y_test.shape) | code |
33122543/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from datetime import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
name = 'Raghu'
from datetime import datetime
started_at = datetime.now().strftime('%H:%M:%S')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
def massage_data(data):
data['Honorific'] = data['Name']
titles = data['Name'].str.split(',')
for indx, title in enumerate(titles):
data['Honorific'][indx] = title[1].split('.')[0]
data['HonorificEnc'] = data['Honorific']
for indx, hon in enumerate(train_data.Honorific.value_counts().index):
data.HonorificEnc.replace(hon, indx, inplace=True)
data['CoPassengers'] = data['SibSp'] + data['Parch']
data.Sex.replace('male', 0, inplace=True)
data.Sex.replace('female', 1, inplace=True)
most_likely_fare = data['Fare'].mean()
data['Fare'] = data['Fare'].fillna(most_likely_fare)
most_likely_age = data['Age'].mean()
data['Age'] = data['Age'].fillna(most_likely_age)
most_likely_embarkation = data['Embarked'].mode()
data['Embarked'] = data['Embarked'].fillna(most_likely_embarkation)
data['EmbarkedEnc'] = data['Embarked']
data.EmbarkedEnc.replace('S', 0, inplace=True)
data.EmbarkedEnc.replace('C', 1, inplace=True)
data.EmbarkedEnc.replace('Q', 2, inplace=True)
return data
test_data = massage_data(test_data)
train_data = massage_data(train_data)
print(train_data.head()) | code |
122263777/cell_6 | [
"text_html_output_1.png"
] | !pip install recipe-scrapers # Insalling scraping lib | code |
122263777/cell_19 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
from recipe_scrapers import scrape_me
import pandas as pd
import re
URLs = ['https://www.allrecipes.com/recipes/721/world-cuisine/european/french/', 'https://www.allrecipes.com/recipes/16126/world-cuisine/european/french/french-bread/', 'https://www.allrecipes.com/recipes/17138/world-cuisine/european/french/main-dishes/', 'https://www.allrecipes.com/recipes/1857/world-cuisine/european/french/main-dishes/pork/', 'https://www.allrecipes.com/recipes/1858/world-cuisine/european/french/main-dishes/chicken/', 'https://www.allrecipes.com/recipes/1828/world-cuisine/european/french/desserts/', 'https://www.allrecipes.com/recipes/1829/world-cuisine/european/french/soups-and-stews/', 'https://www.allrecipes.com/recipes/1848/world-cuisine/european/french/appetizers/']
recipes_urls = {}
for u in URLs:
lst_var = []
url = u
res = requests.get(url).text
soup = BeautifulSoup(res, 'html.parser')
name_ = soup.find_all('h1', class_='comp mntl-taxonomysc-heading mntl-text-block')
try:
for i in range(len(soup.find_all('a', class_='comp card--image-top mntl-card-list-items mntl-document-card mntl-card card card--no-image'))):
url = soup.find_all('a', class_='comp card--image-top mntl-card-list-items mntl-document-card mntl-card card card--no-image')[i]['href']
lst_var.append(url)
except:
pass
for i in range(len(soup.find_all('a', class_='comp mntl-card-list-items mntl-document-card mntl-card card card--no-image'))):
url = soup.find_all('a', class_='comp mntl-card-list-items mntl-document-card mntl-card card card--no-image')[i]['href']
lst_var.append(url)
recipes_urls[name_[0].text[1:]] = lst_var
len(recipes_urls)
recipes = {}
i = 0
for name,url_list in recipes_urls.items():
temp_dict = {}
for url in url_list:
recipe = {}
scraper = scrape_me(url)
recipe["Contient"] = "Europe"
recipe["Country_State"] = "France"
try:
recipe["cuisine"] = name
except:
recipe["cuisine"] = float("nan")
try:
recipe["title"] = scraper.title()
except:
recipe["title"] = float("nan")
try:
recipe["URL"] = url
except:
recipe["URL"] = float("nan")
try:
recipe["rating"] = scraper.ratings()
except:
recipe["rating"] = float("nan")
try:
recipe["total_time"]= scraper.total_time()
except:
recipe["total_time"] = float("nan")
try:
recipe["prep_time"] = scraper.prep_time()
except:
recipe["prep_time"] = float("nan")
try:
recipe["cook_time"] = scraper.cook_time()
except:
recipe["cook_time"] = float("nan")
try:
recipe["description"] = scraper.description()
except:
recipe["description"] = float("nan")
try:
recipe["ingredients"] = scraper.ingredients()
except:
recipe["ingredients"] = float("nan")
try:
recipe["instructions"] = scraper.instructions_list()
except:
recipe["instructions"] = float("nan")
try:
recipe["nutrients"] = scraper.nutrients()
except:
recipe["nutrients"] = float("nan")
try:
recipe["serves"] = scraper.yields()
except:
recipe["serves"] = float("nan")
try:
res = requests.get(url).text
soup = BeautifulSoup(res,"html.parser")
rate = soup.find("div",class_ = "comp type--squirrel mntl-recipe-review-bar__rating-count mntl-text-block")
pattern = r'[0-9,]+'
s = rate.text
match = re.search(pattern, s)
if match:
number = int(match.group().replace(',', ''))
recipe["rating_count"] = number
except:
recipe["rating_count"] = 0
time.sleep(1)
recipes[f"{i}"] = recipe
i+=1
df_France = pd.DataFrame()
for data in recipes.keys():
df_France = df_France.append(recipes.get(data), ignore_index=True)
df_France.head(5) | code |
122263777/cell_12 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
URLs = ['https://www.allrecipes.com/recipes/721/world-cuisine/european/french/', 'https://www.allrecipes.com/recipes/16126/world-cuisine/european/french/french-bread/', 'https://www.allrecipes.com/recipes/17138/world-cuisine/european/french/main-dishes/', 'https://www.allrecipes.com/recipes/1857/world-cuisine/european/french/main-dishes/pork/', 'https://www.allrecipes.com/recipes/1858/world-cuisine/european/french/main-dishes/chicken/', 'https://www.allrecipes.com/recipes/1828/world-cuisine/european/french/desserts/', 'https://www.allrecipes.com/recipes/1829/world-cuisine/european/french/soups-and-stews/', 'https://www.allrecipes.com/recipes/1848/world-cuisine/european/french/appetizers/']
recipes_urls = {}
for u in URLs:
lst_var = []
url = u
res = requests.get(url).text
soup = BeautifulSoup(res, 'html.parser')
name_ = soup.find_all('h1', class_='comp mntl-taxonomysc-heading mntl-text-block')
try:
for i in range(len(soup.find_all('a', class_='comp card--image-top mntl-card-list-items mntl-document-card mntl-card card card--no-image'))):
url = soup.find_all('a', class_='comp card--image-top mntl-card-list-items mntl-document-card mntl-card card card--no-image')[i]['href']
lst_var.append(url)
except:
pass
for i in range(len(soup.find_all('a', class_='comp mntl-card-list-items mntl-document-card mntl-card card card--no-image'))):
url = soup.find_all('a', class_='comp mntl-card-list-items mntl-document-card mntl-card card card--no-image')[i]['href']
lst_var.append(url)
recipes_urls[name_[0].text[1:]] = lst_var
len(recipes_urls) | code |
72081028/cell_13 | [
"text_plain_output_1.png"
] | from numpy import array, argmax, random, take
import matplotlib.pyplot as plt
import pandas as pd
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
eng_l = []
deu_l = []
for i in deu_eng[:, 0]:
eng_l.append(len(i.split()))
for i in deu_eng[:, 1]:
deu_l.append(len(i.split()))
length_df = pd.DataFrame({'eng': eng_l, 'deu': deu_l})
length_df.hist(bins=30)
plt.show | code |
72081028/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from numpy import array, argmax, random, take
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
deu_eng | code |
72081028/cell_34 | [
"image_output_1.png"
] | from numpy import array, argmax, random, take
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.models import Sequential
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
def encode_sequences(tokenizer, length, lines):
seq = tokenizer.texts_to_sequences(lines)
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
trainX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
trainY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
testX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
testY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
model = build_model(deu_vocab_size, eng_vocab_size, deu_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
filename = 'model.h1.24_manish'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1), epochs=5, batch_size=512, validation_split=0.2, callbacks=[checkpoint], verbose=1)
model = load_model('model.h1.24_manish')
preds = model.predict_classes(testX.reshape((testX.shape[0], testX.shape[1]))) | code |
72081028/cell_30 | [
"text_plain_output_1.png"
] | from numpy import array, argmax, random, take
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
def encode_sequences(tokenizer, length, lines):
seq = tokenizer.texts_to_sequences(lines)
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
trainX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
trainY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
model = build_model(deu_vocab_size, eng_vocab_size, deu_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
filename = 'model.h1.24_manish'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1), epochs=5, batch_size=512, validation_split=0.2, callbacks=[checkpoint], verbose=1) | code |
72081028/cell_32 | [
"text_plain_output_1.png"
] | from numpy import array, argmax, random, take
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
import pandas as pd
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
eng_l = []
deu_l = []
for i in deu_eng[:, 0]:
eng_l.append(len(i.split()))
for i in deu_eng[:, 1]:
deu_l.append(len(i.split()))
length_df = pd.DataFrame({'eng': eng_l, 'deu': deu_l})
plt.show
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
def encode_sequences(tokenizer, length, lines):
seq = tokenizer.texts_to_sequences(lines)
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
trainX = encode_sequences(deu_tokenizer, deu_length, train[:, 1])
trainY = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
model = build_model(deu_vocab_size, eng_vocab_size, deu_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
filename = 'model.h1.24_manish'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1), epochs=5, batch_size=512, validation_split=0.2, callbacks=[checkpoint], verbose=1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['train', 'validation'])
plt.show() | code |
72081028/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from numpy import array, argmax, random, take
from keras.preprocessing.text import Tokenizer
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
eng_tokenizer = tokenization(deu_eng[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 8
print('English Vocabulary Size: %d' % eng_vocab_size) | code |
72081028/cell_17 | [
"text_plain_output_1.png"
] | from numpy import array, argmax, random, take
from keras.preprocessing.text import Tokenizer
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text('../input/german-to-english/deu.txt')
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000, :]
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
deu_tokenizer = tokenization(deu_eng[:, 1])
deu_vocab_size = len(deu_tokenizer.word_index) + 1
deu_length = 8
print('Deutch Vocabulary Size: %d' % deu_vocab_size) | code |
2002850/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sparse
books = pd.read_csv('../input/Books.csv', encoding='ISO 8859-1')
users = pd.read_csv('../input/Users.csv', encoding='ISO 8859-1')
book_ratings = pd.read_csv('../input/BookRatings.csv', encoding='ISO 8859-1', low_memory=False)
book_ratings['UserID'] = book_ratings.UserID.astype(int)
grouped_cleaned = book_ratings.groupby(['UserID', 'ISBN']).sum().reset_index()
grouped_cleaned = grouped_cleaned.query('BookRating > 0')
grouped_cleaned.shape
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
users = list(np.sort(grouped_cleaned.UserID.unique()))
books = list(grouped_cleaned.ISBN.unique())
ratings = list(grouped_cleaned.BookRating)
rows = grouped_cleaned.UserID.astype('category', categories=users).cat.codes
cols = grouped_cleaned.ISBN.astype('category', categories=books).cat.codes
ratings_sparse = sparse.csr_matrix((ratings, (rows, cols)), shape=(len(users), len(books)))
matrix_size = ratings_sparse.shape[0] * ratings_sparse.shape[1]
num_ratings = len(ratings_sparse.nonzero()[0])
sparsity = 100 * (1 - num_ratings / matrix_size)
sparsity | code |
2002850/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
books = pd.read_csv('../input/Books.csv', encoding='ISO 8859-1')
users = pd.read_csv('../input/Users.csv', encoding='ISO 8859-1')
book_ratings = pd.read_csv('../input/BookRatings.csv', encoding='ISO 8859-1', low_memory=False)
book_ratings['UserID'] = book_ratings.UserID.astype(int)
grouped_cleaned = book_ratings.groupby(['UserID', 'ISBN']).sum().reset_index()
grouped_cleaned = grouped_cleaned.query('BookRating > 0')
grouped_cleaned.shape | code |
2002850/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2002850/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sparse
books = pd.read_csv('../input/Books.csv', encoding='ISO 8859-1')
users = pd.read_csv('../input/Users.csv', encoding='ISO 8859-1')
book_ratings = pd.read_csv('../input/BookRatings.csv', encoding='ISO 8859-1', low_memory=False)
book_ratings['UserID'] = book_ratings.UserID.astype(int)
grouped_cleaned = book_ratings.groupby(['UserID', 'ISBN']).sum().reset_index()
grouped_cleaned = grouped_cleaned.query('BookRating > 0')
grouped_cleaned.shape
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
users = list(np.sort(grouped_cleaned.UserID.unique()))
books = list(grouped_cleaned.ISBN.unique())
ratings = list(grouped_cleaned.BookRating)
rows = grouped_cleaned.UserID.astype('category', categories=users).cat.codes
cols = grouped_cleaned.ISBN.astype('category', categories=books).cat.codes
ratings_sparse = sparse.csr_matrix((ratings, (rows, cols)), shape=(len(users), len(books)))
ratings_sparse | code |
88100945/cell_4 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
TRAIN_PATH = '../input/tabular-playground-series-feb-2022/train.csv'
TEST_PATH = '../input/tabular-playground-series-feb-2022/test.csv'
PSEUDO_PATH = '../input/automl-tps-02-22-flaml-prediction/submission.csv'
ID = 'row_id'
TARGET = 'target'
train = pd.read_csv(TRAIN_PATH)
print('train size = ', len(train))
train.head() | code |
88100945/cell_6 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
TRAIN_PATH = '../input/tabular-playground-series-feb-2022/train.csv'
TEST_PATH = '../input/tabular-playground-series-feb-2022/test.csv'
PSEUDO_PATH = '../input/automl-tps-02-22-flaml-prediction/submission.csv'
ID = 'row_id'
TARGET = 'target'
train = pd.read_csv(TRAIN_PATH)
test = pd.read_csv(TEST_PATH)
pseudo = pd.read_csv(PSEUDO_PATH)
pseudo = pseudo.drop([ID], axis=1)
pseudo_train = pd.concat([test, pseudo], axis=1)
pseudo_train.reset_index(inplace=True, drop=True)
print('pseudo_train size = ', len(pseudo_train))
pseudo_train.head() | code |
88100945/cell_8 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
TRAIN_PATH = '../input/tabular-playground-series-feb-2022/train.csv'
TEST_PATH = '../input/tabular-playground-series-feb-2022/test.csv'
PSEUDO_PATH = '../input/automl-tps-02-22-flaml-prediction/submission.csv'
ID = 'row_id'
TARGET = 'target'
train = pd.read_csv(TRAIN_PATH)
test = pd.read_csv(TEST_PATH)
pseudo = pd.read_csv(PSEUDO_PATH)
pseudo = pseudo.drop([ID], axis=1)
pseudo_train = pd.concat([test, pseudo], axis=1)
pseudo_train.reset_index(inplace=True, drop=True)
new_train = pd.concat([train, pseudo_train], axis=0, ignore_index=True)
print('new_train size = ', len(new_train))
new_train.head() | code |
18161210/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des.head() | code |
18161210/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
print('train:', train.shape, 'test:', test.shape, sep='\n') | code |
18161210/cell_20 | [
"text_html_output_1.png"
] | from sklearn.model_selection import cross_val_score
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des = pd.get_dummies(des)
X_train = des.iloc[0:32967, :]
X_test = des.iloc[32967:41188, :]
classifier = xgboost.XGBClassifier()
classifier = xgboost.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=0.3, gamma=0.4, learning_rate=0.1, max_delta_step=0, max_depth=8, min_child_weight=3, missing=None, n_estimators=10, n_jobs=1, nthread=None, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=None, subsample=1, verbosity=1)
from sklearn.model_selection import cross_val_score
score = cross_val_score(classifier, X_train, y_train, cv=5)
score.mean() | code |
18161210/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des = pd.get_dummies(des)
X_train = des.iloc[0:32967, :]
X_test = des.iloc[32967:41188, :]
X_train.head() | code |
18161210/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18161210/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des = pd.get_dummies(des)
X_train = des.iloc[0:32967, :]
X_test = des.iloc[32967:41188, :]
params = {'learning_rate': [0.05, 0.1, 0.15, 0.2, 0.25, 0.3], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'n_estimators': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'min_child_weight': [1, 3, 5, 7], 'gamma': [0.0, 0.1, 0.2, 0.3, 0.4], 'colsample_bytree': [0.3, 0.4, 0.5, 0.7]}
classifier = xgboost.XGBClassifier()
random_search = RandomizedSearchCV(classifier, param_distributions=params, n_iter=20, scoring='roc_auc', n_jobs=-1, cv=5, verbose=3)
random_search.fit(X_train, y_train) | code |
18161210/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
18161210/cell_17 | [
"text_html_output_1.png"
] | from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des = pd.get_dummies(des)
X_train = des.iloc[0:32967, :]
X_test = des.iloc[32967:41188, :]
params = {'learning_rate': [0.05, 0.1, 0.15, 0.2, 0.25, 0.3], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'n_estimators': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'min_child_weight': [1, 3, 5, 7], 'gamma': [0.0, 0.1, 0.2, 0.3, 0.4], 'colsample_bytree': [0.3, 0.4, 0.5, 0.7]}
classifier = xgboost.XGBClassifier()
random_search = RandomizedSearchCV(classifier, param_distributions=params, n_iter=20, scoring='roc_auc', n_jobs=-1, cv=5, verbose=3)
random_search.fit(X_train, y_train)
random_search.best_estimator_ | code |
18161210/cell_22 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xgboost
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.y
train = train.iloc[:, 1:15]
test = test.iloc[:, 1:15]
des = pd.concat((train, test))
des = pd.get_dummies(des)
X_train = des.iloc[0:32967, :]
X_test = des.iloc[32967:41188, :]
classifier = xgboost.XGBClassifier()
classifier = xgboost.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=0.3, gamma=0.4, learning_rate=0.1, max_delta_step=0, max_depth=8, min_child_weight=3, missing=None, n_estimators=10, n_jobs=1, nthread=None, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=None, subsample=1, verbosity=1)
classifier.fit(X_train, y_train) | code |
88101554/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape | code |
88101554/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
nusers = tempdf.user_id.nunique()
nrests = tempdf.business_id.nunique()
(nusers, nrests)
print('The total number of unique Users :', nusers)
print('The total number of unique Businesses :', nrests) | code |
88101554/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
tempdf.head(3) | code |
88101554/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.head(2) | code |
88101554/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
nusers = tempdf.user_id.nunique()
nrests = tempdf.business_id.nunique()
(nusers, nrests)
tempdf.shape | code |
88101554/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.layers import Activation
from tensorflow.keras import backend as K
from sklearn.model_selection import train_test_split
import tensorflow.keras as keras
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.layers import Input, Embedding, Add, Dot, Flatten
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
import pydot
from tensorflow.keras.utils import model_to_dot
from IPython.display import SVG
from operator import itemgetter
from sklearn.decomposition import PCA | code |
88101554/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts)) | code |
88101554/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape | code |
88101554/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.head() | code |
88101554/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.head(3) | code |
88101554/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
restdf = restdf.drop('business_id', axis=1).set_index('id')
restdf.head() | code |
88101554/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.head() | code |
88101554/cell_31 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
nusers = tempdf.user_id.nunique()
nrests = tempdf.business_id.nunique()
(nusers, nrests)
tempdf.shape
train_indices, test_indices = train_test_split(range(tempdf.shape[0]), train_size=0.7)
trdf = tempdf.iloc[train_indices]
testdf = tempdf.iloc[test_indices]
(trdf.shape, testdf.shape) | code |
88101554/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
nusers = tempdf.user_id.nunique()
nrests = tempdf.business_id.nunique()
(nusers, nrests) | code |
88101554/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
restdf.head() | code |
88101554/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
restdf.shape
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx))
tempdf1 = tempdf.copy()
restdf1 = restdf.copy()
tempdf['business_id'] = tempdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf['user_id'] = tempdf.user_id.apply(lambda x: userid2idx[x])
restdf = restdf[restdf.business_id.isin(restraunts)]
restdf['id'] = restdf.business_id.apply(lambda x: restrauntid2idx[x])
tempdf.reset_index(inplace=True)
tempdf.drop(labels=['index', 'review_id'], axis=1, inplace=True)
tempdf.head() | code |
88101554/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
restdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/restdf.pkl')
restdf = restdf.drop(columns=['address', 'city', 'state', 'postal_code', 'latitude', 'longitude', 'review_count', 'is_open', 'attributes', 'hours'])
tempdf = pd.read_pickle('../input/recommendationsyetemforrestraunts/tempdf.pkl')
tempdf = tempdf.drop(columns=['useful', 'funny', 'cool'])
tempdf.shape
users = tempdf.user_id.unique()
restraunts = tempdf.business_id.unique()
(len(users), len(restraunts))
userid2idx = {o: i for i, o in enumerate(users)}
restrauntid2idx = {o: i for i, o in enumerate(restraunts)}
(len(userid2idx), len(restrauntid2idx)) | code |
89143029/cell_4 | [
"text_html_output_1.png"
] | from pandas import read_csv
from pandas import read_csv
train = read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv')
test = read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv')
sample = read_csv('/kaggle/input/tabular-playground-series-mar-2022/sample_submission.csv')
train.memory_usage().sum() / 1000000
test.memory_usage().sum() / 1000000
sample.memory_usage().sum() / 1000000
train | code |
89143029/cell_3 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from pandas import read_csv
from pandas import read_csv
train = read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv')
test = read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv')
sample = read_csv('/kaggle/input/tabular-playground-series-mar-2022/sample_submission.csv')
train.memory_usage().sum() / 1000000
test.memory_usage().sum() / 1000000
sample.memory_usage().sum() / 1000000 | code |
89143029/cell_5 | [
"text_html_output_1.png"
] | from pandas import read_csv
from pandas import read_csv
train = read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv')
test = read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv')
sample = read_csv('/kaggle/input/tabular-playground-series-mar-2022/sample_submission.csv')
train.memory_usage().sum() / 1000000
test.memory_usage().sum() / 1000000
sample.memory_usage().sum() / 1000000
sample | code |
128009742/cell_13 | [
"text_html_output_1.png"
] | """plt.figure(figsize = (8, 4), dpi = 300)
sns.barplot(data = mae_list.reindex((mae_list).mean().sort_values().index, axis = 1), palette = 'viridis', orient = 'h')
plt.title('MAE Comparison', weight = 'bold', size = 20)
plt.show()
""" | code |
128009742/cell_23 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, NMF
from sklearn.manifold import TSNE
from umap import UMAP
from scipy.cluster.hierarchy import dendrogram, ward
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
X = train.copy()
y = X.pop('yield')
seed = 42
splits = 5
k = KFold(n_splits=splits, random_state=seed, shuffle=True)
np.random.seed(seed)
def cross_val_score(model, cv=k, label=''):
X = train.copy()
y = X.pop('yield')
val_predictions = np.zeros(len(train))
train_predictions = np.zeros(len(train))
train_mae, val_mae = ([], [])
for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
model.fit(X.iloc[train_idx], y.iloc[train_idx])
train_preds = model.predict(X.iloc[train_idx])
val_preds = model.predict(X.iloc[val_idx])
train_predictions[train_idx] += train_preds
val_predictions[val_idx] += val_preds
train_score = mean_absolute_error(y.iloc[train_idx], train_preds)
val_score = mean_absolute_error(y.iloc[val_idx], val_preds)
train_mae.append(train_score)
val_mae.append(val_score)
return val_mae
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
def cross_val_and_test(model, cv=k, label=''):
X = train.copy()
y = X.pop('yield')
val_predictions = np.zeros(len(train))
train_predictions = np.zeros(len(train))
train_mae, val_mae = ([], [])
test_predictions = np.zeros(len(test))
for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
model.fit(X.iloc[train_idx], y.iloc[train_idx])
train_preds = model.predict(X.iloc[train_idx])
val_preds = model.predict(X.iloc[val_idx])
test_predictions += model.predict(test)
train_predictions[train_idx] += train_preds
val_predictions[val_idx] = +val_preds
train_score = mean_absolute_error(y.iloc[train_idx], train_preds)
val_score = mean_absolute_error(y.iloc[val_idx], val_preds)
train_mae.append(train_score)
val_mae.append(val_score)
return test_predictions / splits
reg_models = [('lgb', LGBMRegressor(random_state=seed, objective='mae')), ('cb', CatBoostRegressor(random_state=seed, objective='MAE', verbose=0)), ('hgb', HistGradientBoostingRegressor(random_state=seed, loss='absolute_error'))]
prediction = np.zeros(len(test))
for label, model in reg_models:
prediction += cross_val_and_test(model, label=label)
prediction /= len(reg_models)
test_1.drop(list(test_1.drop('id', axis=1)), axis=1, inplace=True)
test_1['yield'] = prediction
test_1.to_csv('submission.csv', index=False)
test_1.head() | code |
128009742/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, NMF
from sklearn.manifold import TSNE
from umap import UMAP
from scipy.cluster.hierarchy import dendrogram, ward
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100) | code |
128009742/cell_16 | [
"text_plain_output_1.png"
] | """for (label, model) in models:
mae_list[label] = cross_val_score(
Pipeline([('fe1', FE1()), (label, model)]),
label = label
)
""" | code |
128009742/cell_17 | [
"text_plain_output_1.png"
] | """plt.figure(figsize = (8, 4), dpi = 300)
sns.barplot(data = mae_list.reindex((mae_list).mean().sort_values().index, axis = 1), palette = 'viridis', orient = 'h')
plt.title('MAE Comparison', weight = 'bold', size = 20)
plt.show()""" | code |
128009742/cell_22 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, NMF
from sklearn.manifold import TSNE
from umap import UMAP
from scipy.cluster.hierarchy import dendrogram, ward
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
X = train.copy()
y = X.pop('yield')
seed = 42
splits = 5
k = KFold(n_splits=splits, random_state=seed, shuffle=True)
np.random.seed(seed)
def cross_val_score(model, cv=k, label=''):
X = train.copy()
y = X.pop('yield')
val_predictions = np.zeros(len(train))
train_predictions = np.zeros(len(train))
train_mae, val_mae = ([], [])
for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
model.fit(X.iloc[train_idx], y.iloc[train_idx])
train_preds = model.predict(X.iloc[train_idx])
val_preds = model.predict(X.iloc[val_idx])
train_predictions[train_idx] += train_preds
val_predictions[val_idx] += val_preds
train_score = mean_absolute_error(y.iloc[train_idx], train_preds)
val_score = mean_absolute_error(y.iloc[val_idx], val_preds)
train_mae.append(train_score)
val_mae.append(val_score)
return val_mae
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
def cross_val_and_test(model, cv=k, label=''):
X = train.copy()
y = X.pop('yield')
val_predictions = np.zeros(len(train))
train_predictions = np.zeros(len(train))
train_mae, val_mae = ([], [])
test_predictions = np.zeros(len(test))
for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
model.fit(X.iloc[train_idx], y.iloc[train_idx])
train_preds = model.predict(X.iloc[train_idx])
val_preds = model.predict(X.iloc[val_idx])
test_predictions += model.predict(test)
train_predictions[train_idx] += train_preds
val_predictions[val_idx] = +val_preds
train_score = mean_absolute_error(y.iloc[train_idx], train_preds)
val_score = mean_absolute_error(y.iloc[val_idx], val_preds)
train_mae.append(train_score)
val_mae.append(val_score)
return test_predictions / splits
reg_models = [('lgb', LGBMRegressor(random_state=seed, objective='mae')), ('cb', CatBoostRegressor(random_state=seed, objective='MAE', verbose=0)), ('hgb', HistGradientBoostingRegressor(random_state=seed, loss='absolute_error'))]
prediction = np.zeros(len(test))
for label, model in reg_models:
prediction += cross_val_and_test(model, label=label)
prediction /= len(reg_models)
test_1.drop(list(test_1.drop('id', axis=1)), axis=1, inplace=True) | code |
128009742/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold, KFold
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.metrics import mean_absolute_error, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, NMF
from sklearn.manifold import TSNE
from umap import UMAP
from scipy.cluster.hierarchy import dendrogram, ward
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
sns.set_theme(style='white', palette='viridis')
pal = sns.color_palette('viridis')
pd.set_option('display.max_rows', 100)
train = pd.read_csv('../input/playground-series-s3e14/train.csv')
test_1 = pd.read_csv('../input/playground-series-s3e14/test.csv')
orig_train = pd.read_csv('../input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv')
train.drop('id', axis=1, inplace=True)
test = test_1.drop('id', axis=1)
orig_train.drop('Row#', axis=1, inplace=True)
train.head(10) | code |
130015002/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df.info() | code |
130015002/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns | code |
130015002/cell_25 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df = train_df.drop(['AveRooms'], axis=1)
train_df = train_df.drop(['AveBedrms'], axis=1)
plt.figure(figsize=(12, 6))
sns.pairplot(train_df, x_vars=['Population'], y_vars=['MedHouseVal'], size=7, kind='scatter', hue='Population', palette='Greens_r') | code |
130015002/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv')
train_df.shape
train_df.columns
train_df.isnull().sum()
train_df = train_df.drop(['AveRooms'], axis=1)
train_df = train_df.drop(['AveBedrms'], axis=1)
train_df.isnull().sum() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.