path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
16169565/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from skimage import io, color, exposure, transform
import cv2
import cv2
import numpy as np # linear algebra
import os
import os
def preprocess_img(img):
# Histogram normalization in y
hsv = color.rgb2hsv(img)
hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2])
img = color.hsv2rgb(hsv)
# central scrop
min_side = min(img.shape[:-1])
centre = img.shape[0]//2, img.shape[1]//2
img = img[centre[0]-min_side//2:centre[0]+min_side//2,
centre[1]-min_side//2:centre[1]+min_side//2,
:]
# rescale to standard size
img = transform.resize(img, (IMG_SIZE, IMG_SIZE))
# roll color axis to axis 0
img = np.rollaxis(img,-1)
return img
import cv2
DATA_DIR_TRAIN = '../input/dataset/dataset/train'
CATEGORIES = ['left', 'right']
x_train = []
y_train = []
img_data_train = []
for category in CATEGORIES:
path = os.path.join(DATA_DIR_TRAIN, category)
class_num = CATEGORIES.index(category)
for img_path in os.listdir(path):
img = cv2.imread(os.path.join(path, img_path))
img_data_train.append(img)
new_img = preprocess_img(img)
x_train.append(new_img)
y_train.append(class_num)
import cv2
DATA_DIR_TEST = '../input/dataset/dataset/test'
CATEGORIES = ['left', 'right']
x_test = []
y_test = []
img_data_test = []
for category in CATEGORIES:
path = os.path.join(DATA_DIR_TEST, category)
class_num = CATEGORIES.index(category)
for img_path in os.listdir(path):
img = cv2.imread(os.path.join(path, img_path))
img_data_test.append(img)
new_img = preprocess_img(img)
x_test.append(new_img)
y_test.append(class_num)
x_train_np = np.array(x_train, dtype='float32')
y_train_np = np.eye(NUM_CLASSES, dtype='uint8')[y_train]
x_test_np = np.array(x_test, dtype='float32')
y_test_np = np.eye(NUM_CLASSES, dtype='uint8')[y_test]
print(x_train_np.shape)
print(y_train_np.shape)
print(x_test_np.shape)
print(y_test_np.shape) | code |
17112602/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5) | code |
17112602/cell_9 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape) | code |
17112602/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
y_test_pred = model.predict_proba(X_test)[:, 1]
f1_score(y_test, y_test_pred > 0.5)
fpr, tpr, _ = roc_curve(y_test, y_test_pred)
roc_auc = auc(fpr, tpr)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
df_acc = pd.DataFrame(columns=['minprob', 'maxprob', 'count', 'accuracy'])
for pbot in np.linspace(0, 0.9, 10):
ptop = pbot + 0.1
mask = (y_test_pred >= pbot) & (y_test_pred < ptop)
count = int(mask.sum())
actual = pd.Series(y_test)[mask].values
pred_prob = pd.Series(y_test_pred)[mask].values
pred_bin = pred_prob > 0.5
acc = accuracy_score(actual, pred_bin)
row = pd.DataFrame({'minprob': [pbot], 'maxprob': [ptop], 'count': [count], 'accuracy': [acc]})
df_acc = pd.concat([df_acc, row])
df_acc.set_index(['minprob', 'maxprob']) | code |
17112602/cell_6 | [
"image_output_1.png"
] | !ls $basepath | code |
17112602/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import os
print(os.listdir('../input')) | code |
17112602/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
pca = PCA(64)
pca.fit(X_train)
print(pca.explained_variance_ratio_) | code |
17112602/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
y_test_pred = model.predict_proba(X_test)[:, 1]
f1_score(y_test, y_test_pred > 0.5)
fpr, tpr, _ = roc_curve(y_test, y_test_pred)
roc_auc = auc(fpr, tpr)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
pd.Series(y_test_pred).hist()
plt.show() | code |
17112602/cell_7 | [
"image_output_1.png"
] | import pandas as pd
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_train.head() | code |
17112602/cell_18 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
y_test_pred = model.predict_proba(X_test)[:, 1]
f1_score(y_test, y_test_pred > 0.5)
fpr, tpr, _ = roc_curve(y_test, y_test_pred)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve')
plt.legend(loc='lower right')
plt.show() | code |
17112602/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
accuracy_score(y_valid, y_pred > 0.5) | code |
17112602/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
y_test_pred = model.predict_proba(X_test)[:, 1]
print(confusion_matrix(y_test, y_test_pred > 0.5))
f1_score(y_test, y_test_pred > 0.5) | code |
17112602/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
f1_score(y_valid, y_pred > 0.5)
y_test_pred = model.predict_proba(X_test)[:, 1]
f1_score(y_test, y_test_pred > 0.5)
accuracy_score(y_test, y_test_pred > 0.5) | code |
17112602/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train)
y_train_pred = model.predict_proba(X_train)[:, 1]
f1_score(y_train, y_train_pred > 0.5)
y_pred = model.predict_proba(X_valid)[:, 1]
print(confusion_matrix(y_valid, y_pred > 0.5))
f1_score(y_valid, y_pred > 0.5) | code |
17112602/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
(X_train[:5, :5], X_valid[:5, :5], X_test[:5, :5]) | code |
17112602/cell_12 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
TRAIN_INPUT = 'twitgen_train_201906011956.csv'
VALID_INPUT = 'twitgen_valid_201906011956.csv'
TEST_INPUT = 'twitgen_test_201906011956.csv'
EMBEDDING_DIM = 512
MAXLEN = 50
basepath = '/kaggle/input/'
df_train = pd.read_csv(basepath + TRAIN_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_valid = pd.read_csv(basepath + VALID_INPUT, index_col=['id', 'time'], parse_dates=['time'])
df_test = pd.read_csv(basepath + TEST_INPUT, index_col=['id', 'time'], parse_dates=['time'])
def prepare_data(df):
text = df['text'].tolist()
text = [' '.join(t.split()[0:MAXLEN]) for t in text]
text = np.array(text, dtype=object)
label = df['male'].tolist()
return (text, label)
train_text, y_train = prepare_data(df_train)
valid_text, y_valid = prepare_data(df_valid)
test_text, y_test = prepare_data(df_test)
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/3'
def get_embeddings(text):
with tf.Graph().as_default():
embed = hub.Module(module_url)
embeddings = embed(text)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(embeddings)
chunk = int(len(train_text) / 2)
X_train = np.concatenate([get_embeddings(train_text[:chunk]), get_embeddings(train_text[chunk:])])
X_valid = get_embeddings(valid_text)
X_test = get_embeddings(test_text)
(X_train.shape, X_valid.shape, X_test.shape)
model = Pipeline([('pca', PCA(50)), ('poly', PolynomialFeatures()), ('lr', LogisticRegression(C=0.08))])
model = model.fit(X_train, y_train) | code |
90146477/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum() | code |
90146477/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
cm = df.corr()
plt.figure(figsize=(16, 10))
sns.heatmap(cm, annot=True, cmap='Blues') | code |
90146477/cell_23 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
for col, sk in zip(numerical_cols, skewness_values):
plt.figure(figsize=(14, 6))
sns.histplot(data=df, x=col, bins=10, kde=True)
plt.title(f'Distribution of {col}\nskewness:{sk:.2f}', fontsize=20) | code |
90146477/cell_44 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold,train_test_split,cross_val_score,RandomizedSearchCV,GridSearchCV
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
for col, sk in zip(numerical_cols, skewness_values):
if abs(sk) >= 0.4:
df[col] = np.log1p(df[col])
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
cm = df.corr()
df['Class'] = np.where(df['Class'] == 'Çerçevelik', 1, 0)
X = df[numerical_cols]
y = df['Class']
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=1)
model_0 = RandomForestClassifier(n_jobs=-1)
model_0_result = cross_val_score(model_0, X, y, scoring='accuracy', cv=cv, n_jobs=-1).mean()
model_0_result
model_1 = RandomForestClassifier(n_jobs=-1)
grid = {'n_estimators': np.arange(250, 501, 50), 'max_depth': np.arange(5, 16, 1), 'min_impurity_decrease': np.logspace(-5, -1, 30), 'max_features': ['auto', 0.6, 0.7, 0.8, 0.9], 'max_samples': np.arange(0.5, 1, 0.1), 'min_samples_split': np.arange(5, 16, 1), 'min_samples_leaf': np.arange(2, 16, 1)}
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=1)
rs_model_1 = RandomizedSearchCV(model_1, param_distributions=grid, cv=cv, n_iter=50, verbose=1, n_jobs=-1, scoring='accuracy')
rs_model_1.fit(X, y) | code |
90146477/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.head() | code |
90146477/cell_29 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
df.head() | code |
90146477/cell_41 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold,train_test_split,cross_val_score,RandomizedSearchCV,GridSearchCV
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
cm = df.corr()
X = df[numerical_cols]
y = df['Class']
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=1)
model_0 = RandomForestClassifier(n_jobs=-1)
model_0_result = cross_val_score(model_0, X, y, scoring='accuracy', cv=cv, n_jobs=-1).mean()
model_0_result | code |
90146477/cell_2 | [
"text_plain_output_1.png"
] | !pip install openpyxl --quiet | code |
90146477/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
df.info() | code |
90146477/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewed_cols = []
for col, sk in zip(numerical_cols, skewness_values):
if abs(sk) >= 0.4:
skewed_cols.append(col)
print(f'There are {len(skewed_cols)} columns and they are:\n{skewed_cols}') | code |
90146477/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90146477/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
for col, sk in zip(numerical_cols, skewness_values):
plt.figure(figsize=(14, 6))
sns.histplot(data=df, x=col, bins=10, kde=True)
plt.title(f'Distribution of {col}\nskewness:{sk:.2f}', fontsize=20) | code |
90146477/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape | code |
90146477/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4] | code |
90146477/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3] | code |
90146477/cell_31 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
for col in numerical_cols:
plt.figure(figsize=(14, 6))
sns.violinplot(data=df, x='Class', y=col)
plt.title(f'Distribution of Class with respect to {col}') | code |
90146477/cell_24 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
still_skewed_cols = []
for col, sk in zip(numerical_cols, skewness_values):
if abs(sk) >= 0.4:
still_skewed_cols.append(col)
print(f'There are {len(still_skewed_cols)} columns and they are:\n{still_skewed_cols}') | code |
90146477/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3] | code |
90146477/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum() | code |
90146477/cell_27 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset.xlsx')
df.shape
df.duplicated().sum()
df.isna().sum()
numerical_cols = df.select_dtypes(include=np.number).columns.to_list()
numerical_cols[:4]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
skewness_values = df[numerical_cols].skew().to_list()
skewness_values[:3]
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='Class')
plt.title('Distribution of the Target Class', fontsize=20) | code |
89132848/cell_9 | [
"text_plain_output_1.png"
] | import json
import torch
test_dir = '../input/birdclef-2022/test_soundscapes'
test_base_path = '../input/birdclef-2022/test.csv'
class_dict_base_path = '../input/birdclef-2022-saved-weights-and-misc/class_dict.json'
best_acc_mode_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_accuracy_model.pt'
best_loss_model_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_loss_model.pt'
class_labels = json.load(open(class_dict_base_path, 'r'))
num_classes = len(class_labels.keys())
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device
best_model = CLEFNetwork(num_classes)
checkpoint = torch.load(best_loss_model_base_path, map_location=torch.device(device))
best_model.load_state_dict(checkpoint['model'])
print(best_model) | code |
89132848/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
test_dir = '../input/birdclef-2022/test_soundscapes'
test_base_path = '../input/birdclef-2022/test.csv'
class_dict_base_path = '../input/birdclef-2022-saved-weights-and-misc/class_dict.json'
best_acc_mode_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_accuracy_model.pt'
best_loss_model_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_loss_model.pt'
test_df = pd.read_csv(test_base_path)
test_df.head() | code |
89132848/cell_6 | [
"text_plain_output_1.png"
] | import torch
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device | code |
89132848/cell_11 | [
"text_html_output_1.png"
] | from torchaudio.transforms import MelSpectrogram
from torchvision.transforms import Resize
augm = [MelSpectrogram(n_mels=128), Resize((128, 128))]
augm | code |
89132848/cell_5 | [
"text_plain_output_1.png"
] | import json
test_dir = '../input/birdclef-2022/test_soundscapes'
test_base_path = '../input/birdclef-2022/test.csv'
class_dict_base_path = '../input/birdclef-2022-saved-weights-and-misc/class_dict.json'
best_acc_mode_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_accuracy_model.pt'
best_loss_model_base_path = '../input/birdclef-2022-saved-weights-and-misc/birdclef2022-best_loss_model.pt'
class_labels = json.load(open(class_dict_base_path, 'r'))
num_classes = len(class_labels.keys())
print('Number of class : {}'.format(num_classes)) | code |
105179048/cell_42 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import mplfinance as mpf
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.dtypes
hist_df.set_index('Close Time').tail(100)
mpf.plot(hist_df.set_index('Close Time').tail(120), type='candle', style='charles', volume=True, title='ETHBTC Last 120 Days', mav=(10, 20, 30)) | code |
105179048/cell_13 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
ticker_df.tail() | code |
105179048/cell_9 | [
"image_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
tickers | code |
105179048/cell_30 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes | code |
105179048/cell_20 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
depth_df.dtypes | code |
105179048/cell_29 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape | code |
105179048/cell_26 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.head() | code |
105179048/cell_41 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.dtypes
hist_df.set_index('Close Time').tail(100) | code |
105179048/cell_19 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
depth_df.head() | code |
105179048/cell_18 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
depth = client.get_order_book(symbol='BTCUSDT')
depth | code |
105179048/cell_28 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.tail() | code |
105179048/cell_15 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
ticker_df.set_index('symbol', inplace=True)
float(ticker_df.loc['ETHBTC']['price']) | code |
105179048/cell_38 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.dtypes
hist_df.info() | code |
105179048/cell_3 | [
"text_html_output_1.png"
] | !pip install python-binance pandas mplfinance | code |
105179048/cell_35 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.tail() | code |
105179048/cell_24 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
depth = client.get_order_book(symbol='BTCUSDT')
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
historical | code |
105179048/cell_10 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
tickers[1]['price'] | code |
105179048/cell_37 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.dtypes
hist_df.describe() | code |
105179048/cell_12 | [
"text_plain_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
ticker_df.head() | code |
105179048/cell_36 | [
"text_html_output_1.png"
] | from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
apikey = 'YOURAPIKEY'
secret = 'YOURAPISECRET'
client = Client(apikey, secret)
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['asks'])
depth_df.columns = ['Price', 'Volume']
historical = client.get_historical_klines('ETHBTC', Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']
hist_df.shape
hist_df.dtypes
hist_df.dtypes | code |
128027868/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_test.columns = ['label', 'title', 'text']
df_test.head() | code |
128027868/cell_30 | [
"text_html_output_1.png"
] | from nltk.stem import WordNetLemmatizer, SnowballStemmer
import collections
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
def clean_text(text):
text = re.sub('[^A-Za-zÀ-ú ]+', '', text)
text = re.sub('book|one', '', text)
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation))
text = re.sub('\\s+', ' ', text).strip()
return text
df_train['text'] = df_train['text'].apply(clean_text)
df_test['text'] = df_test['text'].apply(clean_text)
def normalize_text(text):
stemmer = SnowballStemmer('english')
normalized_text = []
for word in text.split():
stemmed_word = stemmer.stem(word)
normalized_text.append(stemmed_word)
return ' '.join(normalized_text)
df_train['text'] = df_train['text'].apply(normalize_text)
df_test['text'] = df_test['text'].apply(normalize_text)
words = []
for text in df_train['text']:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.xticks(range(len(top_words)), list(top_words.keys()))
words = []
for text in df_test['text']:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.figure(figsize=(10, 6))
plt.bar(range(len(top_words)), list(top_words.values()), align='center')
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title('Top 10 most used words', fontsize=18)
plt.xlabel('Words')
plt.ylabel('Frequency') | code |
128027868/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.info() | code |
128027868/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from tensorflow.keras.layers import Embedding, Dense, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import pandas as pd
import tensorflow as tf
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
max_words = 10000
max_len = 200
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df_train['text'])
sequences_train = tokenizer.texts_to_sequences(df_train['text'])
sequences_val = tokenizer.texts_to_sequences(df_test['text'])
word_index = tokenizer.word_index
data_train = pad_sequences(sequences_train, maxlen=max_len)
data_val = pad_sequences(sequences_val, maxlen=max_len)
model = tf.keras.Sequential()
model.add(Embedding(max_words, 16, input_length=max_len))
model.add(GlobalAveragePooling1D())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(data_train, df_train['label'], epochs=15, batch_size=64, validation_data=(data_val, df_test['label']))
loss, accuracy = model.evaluate(data_val, df_test['label'], verbose=0)
print('Accuracy: %f' % (accuracy * 100)) | code |
128027868/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.stem import WordNetLemmatizer, SnowballStemmer
from tensorflow.keras.layers import Embedding, Dense, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import collections
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
import tensorflow as tf
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
def clean_text(text):
text = re.sub('[^A-Za-zÀ-ú ]+', '', text)
text = re.sub('book|one', '', text)
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation))
text = re.sub('\\s+', ' ', text).strip()
return text
df_train['text'] = df_train['text'].apply(clean_text)
df_test['text'] = df_test['text'].apply(clean_text)
def normalize_text(text):
stemmer = SnowballStemmer('english')
normalized_text = []
for word in text.split():
stemmed_word = stemmer.stem(word)
normalized_text.append(stemmed_word)
return ' '.join(normalized_text)
df_train['text'] = df_train['text'].apply(normalize_text)
df_test['text'] = df_test['text'].apply(normalize_text)
words = []
for text in df_train['text']:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.xticks(range(len(top_words)), list(top_words.keys()))
words = []
for text in df_test['text']:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.xticks(range(len(top_words)), list(top_words.keys()))
max_words = 10000
max_len = 200
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df_train['text'])
sequences_train = tokenizer.texts_to_sequences(df_train['text'])
sequences_val = tokenizer.texts_to_sequences(df_test['text'])
word_index = tokenizer.word_index
data_train = pad_sequences(sequences_train, maxlen=max_len)
data_val = pad_sequences(sequences_val, maxlen=max_len)
model = tf.keras.Sequential()
model.add(Embedding(max_words, 16, input_length=max_len))
model.add(GlobalAveragePooling1D())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(data_train, df_train['label'], epochs=15, batch_size=64, validation_data=(data_val, df_test['label']))
plt.style.use('dark_background')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.show() | code |
128027868/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
import string
import collections
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer, SnowballStemmer
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Dense, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences | code |
128027868/cell_28 | [
"text_html_output_1.png"
] | from nltk.stem import WordNetLemmatizer, SnowballStemmer
import collections
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
def clean_text(text):
text = re.sub('[^A-Za-zÀ-ú ]+', '', text)
text = re.sub('book|one', '', text)
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation))
text = re.sub('\\s+', ' ', text).strip()
return text
df_train['text'] = df_train['text'].apply(clean_text)
df_test['text'] = df_test['text'].apply(clean_text)
def normalize_text(text):
stemmer = SnowballStemmer('english')
normalized_text = []
for word in text.split():
stemmed_word = stemmer.stem(word)
normalized_text.append(stemmed_word)
return ' '.join(normalized_text)
df_train['text'] = df_train['text'].apply(normalize_text)
df_test['text'] = df_test['text'].apply(normalize_text)
words = []
for text in df_train['text']:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.figure(figsize=(10, 6))
plt.bar(range(len(top_words)), list(top_words.values()), align='center')
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title('Top 10 most used words', fontsize=18)
plt.xlabel('Words')
plt.ylabel('Frequency') | code |
128027868/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_train.head() | code |
128027868/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
df_train.head() | code |
128027868/cell_35 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers import Embedding, Dense, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.text import Tokenizer
import pandas as pd
import tensorflow as tf
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
max_words = 10000
max_len = 200
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df_train['text'])
sequences_train = tokenizer.texts_to_sequences(df_train['text'])
sequences_val = tokenizer.texts_to_sequences(df_test['text'])
word_index = tokenizer.word_index
model = tf.keras.Sequential()
model.add(Embedding(max_words, 16, input_length=max_len))
model.add(GlobalAveragePooling1D())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary() | code |
128027868/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
print(df_train['label'].value_counts())
print(df_test['label'].value_counts()) | code |
128027868/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.head() | code |
128027868/cell_36 | [
"text_html_output_1.png"
] | from tensorflow.keras.layers import Embedding, Dense, GlobalAveragePooling1D
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import pandas as pd
import tensorflow as tf
df_train = pd.read_csv('/kaggle/input/amazon-reviews/train.csv')
df_test = pd.read_csv('/kaggle/input/amazon-reviews/test.csv')
df_train.columns = ['label', 'title', 'text']
df_test.columns = ['label', 'title', 'text']
df_train = df_train.head(100000)
df_test = df_test.head(10000)
def concat_columns(df, col1, col2, new_col):
df[new_col] = df[col1].apply(str) + ' ' + df[col2].apply(str)
df.drop(col2, axis=1, inplace=True)
return df
df_train = concat_columns(df_train, 'text', 'title', 'text')
df_test = concat_columns(df_test, 'text', 'title', 'text')
max_words = 10000
max_len = 200
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df_train['text'])
sequences_train = tokenizer.texts_to_sequences(df_train['text'])
sequences_val = tokenizer.texts_to_sequences(df_test['text'])
word_index = tokenizer.word_index
data_train = pad_sequences(sequences_train, maxlen=max_len)
data_val = pad_sequences(sequences_val, maxlen=max_len)
model = tf.keras.Sequential()
model.add(Embedding(max_words, 16, input_length=max_len))
model.add(GlobalAveragePooling1D())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(data_train, df_train['label'], epochs=15, batch_size=64, validation_data=(data_val, df_test['label'])) | code |
327932/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.feature_selection import RFECV
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.svm import SVR
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def transform(self, X, *_):
if isinstance(X, pd.DataFrame):
return pd.DataFrame(X[self.columns])
else:
raise TypeError('This transformer only works with Pandas Dataframes')
def fit(self, X, *_):
return self
cs = ColumnSelector('Age')
cs.transform(df).head()
age_pipe = make_pipeline(ColumnSelector('Age'), Imputer(), StandardScaler())
df.Embarked = df.Embarked.fillna('S')
class GetDummiesTransformer(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def transform(self, X, *_):
if isinstance(X, pd.DataFrame):
return pd.get_dummies(X[self.columns], columns=self.columns)
else:
raise TypeError('This transformer only works with Pandas Dataframes')
def fit(self, X, *_):
return self
one_hot_pipe = GetDummiesTransformer(['Pclass', 'Embarked'])
class TrueFalseTransformer(BaseEstimator, TransformerMixin):
def __init__(self, flag):
self.flag = flag
def transform(self, X, *_):
return X == self.flag
def fit(self, X, *_):
return self
gender_pipe = make_pipeline(ColumnSelector('Sex'), TrueFalseTransformer('male'))
fare_pipe = make_pipeline(ColumnSelector('Fare'), StandardScaler())
union = make_union(age_pipe, one_hot_pipe, gender_pipe, fare_pipe)
X = df[[u'Pclass', u'Sex', u'Age', u'SibSp', u'Parch', u'Fare', u'Embarked']]
X_1 = union.fit_transform(X)
new_cols = ['scaled_age', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'male', 'scaled_fare']
Xt = pd.DataFrame(X_1, columns=new_cols)
Xt = pd.concat([Xt, X[[u'SibSp', u'Parch']]], axis=1)
X = Xt
y = df[u'Survived']
from sklearn.feature_selection import RFECV
from sklearn.svm import SVR
estimator = SVR(kernel='linear')
selector = RFECV(estimator, step=1, cv=3)
rfecv_columns = selector.fit_transform(X, y)
rfecv_columns = Xt.columns[selector.support_]
X = X[rfecv_columns]
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.grid_search import GridSearchCV
logreg_parameters = {'penalty': ['l1', 'l2'], 'C': np.logspace(-5, 1, 50), 'solver': ['liblinear']}
lr = LogisticRegression(solver='liblinear')
mdl = lr.fit(X_train, y_train)
gs = GridSearchCV(lr, logreg_parameters, cv=5)
gs.fit(X_train, y_train)
predictions = gs.predict(X) | code |
327932/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from patsy import dmatrices
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn import metrics
df = pd.read_csv('../input/train.csv') | code |
50228414/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
df_metabric = pd.read_csv('../input/breast-cancer-metabric/Breast Cancer METABRIC.csv')
fig, axes = plt.subplots(figsize=(32, 16), ncols=2, nrows=2)
sns.distplot(df_metabric['Age at Diagnosis'], ax=axes[0][0])
sns.countplot(df_metabric['Type of Breast Surgery'], ax=axes[0][1])
sns.countplot(df_metabric['Cancer Type'], ax=axes[1][0])
sns.countplot(df_metabric['Cancer Type Detailed'], ax=axes[1][1])
for i in range(2):
for j in range(2):
if i == 1 and j == 1:
axes[i][j].tick_params(axis='x', labelsize=22, rotation=90)
else:
axes[i][j].tick_params(axis='x', labelsize=22)
axes[i][j].tick_params(axis='y', labelsize=20)
axes[i][j].set_xlabel('')
axes[i][j].set_ylabel('')
axes[0][0].set_title('Age at Diagnosis Distribution', size=22, pad=15)
axes[0][1].set_title('Type of Breast Surgery Distribution', size=22, pad=15)
axes[1][0].set_title('Cancer Type Distribution', size=22, pad=15)
axes[1][1].set_title('Cancer Type Detailed Distribution', size=22, pad=15)
plt.show()
fig, axes = plt.subplots(figsize=(32, 16), ncols=2, nrows=2)
sns.distplot(df_metabric['Overall Survival (Months)'], ax=axes[0][0])
sns.countplot(df_metabric['Overall Survival Status'], ax=axes[0][1])
sns.distplot(df_metabric['Relapse Free Status (Months)'], ax=axes[1][0])
sns.countplot(df_metabric['Relapse Free Status'], ax=axes[1][1])
for i in range(2):
for j in range(2):
if i == 1 and j == 1:
axes[i][j].tick_params(axis='x', labelsize=22)
else:
axes[i][j].tick_params(axis='x', labelsize=22)
axes[i][j].tick_params(axis='y', labelsize=22)
axes[i][j].set_xlabel('')
axes[i][j].set_ylabel('')
axes[0][0].set_title('Overall Survival (Months) Distribution', size=22, pad=15)
axes[0][1].set_title('Overall Survival Status Distribution', size=22, pad=15)
axes[1][0].set_title('Relapse Free Status (Months) Distribution', size=22, pad=15)
axes[1][1].set_title('Relapse Free Status Distribution', size=22, pad=15)
plt.show()
fig = plt.figure(figsize=(24, 16))
sns.barplot(x=df_metabric.isnull().sum().sort_values(ascending=False).values, y=df_metabric.isnull().sum().sort_values(ascending=False).index, palette='Reds_d')
plt.xlabel('Number of Missing Rows', size=20, labelpad=15)
plt.ylabel('')
plt.tick_params(axis='x', labelsize=18)
plt.tick_params(axis='y', labelsize=18)
plt.title('Missing Values in Columns', size=20, pad=15)
plt.show() | code |
50228414/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
df_metabric = pd.read_csv('../input/breast-cancer-metabric/Breast Cancer METABRIC.csv')
fig, axes = plt.subplots(figsize=(32, 16), ncols=2, nrows=2)
sns.distplot(df_metabric['Age at Diagnosis'], ax=axes[0][0])
sns.countplot(df_metabric['Type of Breast Surgery'], ax=axes[0][1])
sns.countplot(df_metabric['Cancer Type'], ax=axes[1][0])
sns.countplot(df_metabric['Cancer Type Detailed'], ax=axes[1][1])
for i in range(2):
for j in range(2):
if i == 1 and j == 1:
axes[i][j].tick_params(axis='x', labelsize=22, rotation=90)
else:
axes[i][j].tick_params(axis='x', labelsize=22)
axes[i][j].tick_params(axis='y', labelsize=20)
axes[i][j].set_xlabel('')
axes[i][j].set_ylabel('')
axes[0][0].set_title('Age at Diagnosis Distribution', size=22, pad=15)
axes[0][1].set_title('Type of Breast Surgery Distribution', size=22, pad=15)
axes[1][0].set_title('Cancer Type Distribution', size=22, pad=15)
axes[1][1].set_title('Cancer Type Detailed Distribution', size=22, pad=15)
plt.show()
fig, axes = plt.subplots(figsize=(32, 16), ncols=2, nrows=2)
sns.distplot(df_metabric['Overall Survival (Months)'], ax=axes[0][0])
sns.countplot(df_metabric['Overall Survival Status'], ax=axes[0][1])
sns.distplot(df_metabric['Relapse Free Status (Months)'], ax=axes[1][0])
sns.countplot(df_metabric['Relapse Free Status'], ax=axes[1][1])
for i in range(2):
for j in range(2):
if i == 1 and j == 1:
axes[i][j].tick_params(axis='x', labelsize=22)
else:
axes[i][j].tick_params(axis='x', labelsize=22)
axes[i][j].tick_params(axis='y', labelsize=22)
axes[i][j].set_xlabel('')
axes[i][j].set_ylabel('')
axes[0][0].set_title('Overall Survival (Months) Distribution', size=22, pad=15)
axes[0][1].set_title('Overall Survival Status Distribution', size=22, pad=15)
axes[1][0].set_title('Relapse Free Status (Months) Distribution', size=22, pad=15)
axes[1][1].set_title('Relapse Free Status Distribution', size=22, pad=15)
plt.show() | code |
50228414/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
df_metabric = pd.read_csv('../input/breast-cancer-metabric/Breast Cancer METABRIC.csv')
df_metabric.head() | code |
50228414/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
df_metabric = pd.read_csv('../input/breast-cancer-metabric/Breast Cancer METABRIC.csv')
fig, axes = plt.subplots(figsize=(32, 16), ncols=2, nrows=2)
sns.distplot(df_metabric['Age at Diagnosis'], ax=axes[0][0])
sns.countplot(df_metabric['Type of Breast Surgery'], ax=axes[0][1])
sns.countplot(df_metabric['Cancer Type'], ax=axes[1][0])
sns.countplot(df_metabric['Cancer Type Detailed'], ax=axes[1][1])
for i in range(2):
for j in range(2):
if i == 1 and j == 1:
axes[i][j].tick_params(axis='x', labelsize=22, rotation=90)
else:
axes[i][j].tick_params(axis='x', labelsize=22)
axes[i][j].tick_params(axis='y', labelsize=20)
axes[i][j].set_xlabel('')
axes[i][j].set_ylabel('')
axes[0][0].set_title('Age at Diagnosis Distribution', size=22, pad=15)
axes[0][1].set_title('Type of Breast Surgery Distribution', size=22, pad=15)
axes[1][0].set_title('Cancer Type Distribution', size=22, pad=15)
axes[1][1].set_title('Cancer Type Detailed Distribution', size=22, pad=15)
plt.show() | code |
50228414/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from scipy.stats import mode
import matplotlib.pyplot as plt
import seaborn as sns
df_metabric = pd.read_csv('../input/breast-cancer-metabric/Breast Cancer METABRIC.csv')
print(f'METABRIC Dataset Shape: {df_metabric.shape}')
print(f'METABRIC Dataset Memory Usage: {df_metabric.memory_usage().sum() / 1024 ** 2:.2f} MB') | code |
106196473/cell_4 | [
"text_plain_output_1.png"
] | import gc
import numpy as np # linear algebra
import pandas as pd
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
df = agg_functions(df)
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
df['size'] = df['size'].apply(lambda x: x.split('x'))
df['size'] = df['size'].apply(lambda x: int(x[0]) * int(x[1]))
df['size'] = df['size'] * 0.0001
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: x.replace('-beta', '.') if 'beta' in x else x + '.0')
def ver_convert_num(ver):
ver = ver.str.split('.', expand=True)
digit = [2, 1, 1, 2]
for i, x in enumerate(ver.columns):
if int(ver[x].str.len().min()) == digit[i]:
continue
ver[x] = ver[x].apply(lambda x: x if len(x) == digit[i] else (digit[i] - len(x)) * '0' + x)
ver = ver.apply(lambda x: int(''.join(x)), axis=1)
return ver
df.mediationProviderVersion = ver_convert_num(df.mediationProviderVersion)
rank = {102100: 0, 103100: 1, 103200: 2, 103400: 3, 103500: 4, 103600: 5, 103700: 6, 110000: 7, 110003: 8, 110004: 9, 110005: 10, 110007: 11, 110101: 12, 110106: 13, 110110: 14, 111000: 15, 111100: 16, 111102: 17, 111103: 18, 111104: 19, 111200: 20, 111305: 21, 112003: 22, 112100: 23, 113100: 24, 113200: 25, 113201: 26, 113300: 27, 114000: 28, 114100: 29, 114200: 30, 114201: 31, 114300: 32, 114304: 33, 114400: 34}
unranked = []
for x in df.mediationProviderVersion.unique():
if x not in rank.keys():
unranked.append(x)
t = list(rank.keys())
tem = [i for i in unranked if i < t[0]]
def interver(tem, border, index):
global rank
tem.sort()
if len(tem) > 0:
tem.append(border)
for i, c in enumerate(tem):
if c == border:
continue
rank[c] = index - (i + 1) / len(tem)
interver(tem, t[0], 0)
for x in range(0, len(t), 2):
if x + 1 == len(t):
continue
tem = [i for i in unranked if i < t[x + 1] and i > t[x]]
interver(tem, t[x + 1], x + 1)
tem = [i for i in unranked if i < t[x] and i > t[x - 1]]
interver(tem, t[x], x)
tem = [i for i in unranked if i > t[-1]]
tem.sort()
if len(tem) > 0:
h = rank[t[-1]] + 1
for i, c in enumerate(tem):
if i + 1 == len(tem):
rank[c] = h
continue
rank[c] = h - (i + 1) / len(tem)
print(rank)
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: rank[x])
df['os'] = df.osAndVersion.apply(lambda x: x.split('-')[0])
df['ver'] = df.osAndVersion.apply(lambda x: x.split('-')[1].split('.')[0])
os_rank = [{4: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 10: 6, 11: 7}, {10: 0, 11: 1, 12: 2, 13: 3, 14: 4, 15: 5, 16: 6}]
threshold = [3, 1]
for i, c in enumerate(df.os.unique()):
rank = os_rank[i]
for x in df.loc[df.os == c, 'ver'].unique():
if int(x) not in rank.keys():
print(x)
g = rank.keys()
x = int(x)
rank[x] = min(g) - x if x < min(g) else rank[max(g)] + (x - max(g))
print(rank)
df.loc[df.os == c, 'ver'] = df.loc[df.os == c, 'ver'].apply(lambda y: rank[int(y)] + threshold[i])
del df['os']
df['ver'] = df['ver'].astype(np.float32)
import gc
gc.collect()
df | code |
106196473/cell_6 | [
"text_html_output_1.png"
] | import gc
import numpy as np # linear algebra
import pandas as pd
import pycountry
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
df = agg_functions(df)
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
df['size'] = df['size'].apply(lambda x: x.split('x'))
df['size'] = df['size'].apply(lambda x: int(x[0]) * int(x[1]))
df['size'] = df['size'] * 0.0001
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: x.replace('-beta', '.') if 'beta' in x else x + '.0')
def ver_convert_num(ver):
ver = ver.str.split('.', expand=True)
digit = [2, 1, 1, 2]
for i, x in enumerate(ver.columns):
if int(ver[x].str.len().min()) == digit[i]:
continue
ver[x] = ver[x].apply(lambda x: x if len(x) == digit[i] else (digit[i] - len(x)) * '0' + x)
ver = ver.apply(lambda x: int(''.join(x)), axis=1)
return ver
df.mediationProviderVersion = ver_convert_num(df.mediationProviderVersion)
rank = {102100: 0, 103100: 1, 103200: 2, 103400: 3, 103500: 4, 103600: 5, 103700: 6, 110000: 7, 110003: 8, 110004: 9, 110005: 10, 110007: 11, 110101: 12, 110106: 13, 110110: 14, 111000: 15, 111100: 16, 111102: 17, 111103: 18, 111104: 19, 111200: 20, 111305: 21, 112003: 22, 112100: 23, 113100: 24, 113200: 25, 113201: 26, 113300: 27, 114000: 28, 114100: 29, 114200: 30, 114201: 31, 114300: 32, 114304: 33, 114400: 34}
unranked = []
for x in df.mediationProviderVersion.unique():
if x not in rank.keys():
unranked.append(x)
t = list(rank.keys())
tem = [i for i in unranked if i < t[0]]
def interver(tem, border, index):
global rank
tem.sort()
if len(tem) > 0:
tem.append(border)
for i, c in enumerate(tem):
if c == border:
continue
rank[c] = index - (i + 1) / len(tem)
interver(tem, t[0], 0)
for x in range(0, len(t), 2):
if x + 1 == len(t):
continue
tem = [i for i in unranked if i < t[x + 1] and i > t[x]]
interver(tem, t[x + 1], x + 1)
tem = [i for i in unranked if i < t[x] and i > t[x - 1]]
interver(tem, t[x], x)
tem = [i for i in unranked if i > t[-1]]
tem.sort()
if len(tem) > 0:
h = rank[t[-1]] + 1
for i, c in enumerate(tem):
if i + 1 == len(tem):
rank[c] = h
continue
rank[c] = h - (i + 1) / len(tem)
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: rank[x])
df['os'] = df.osAndVersion.apply(lambda x: x.split('-')[0])
df['ver'] = df.osAndVersion.apply(lambda x: x.split('-')[1].split('.')[0])
os_rank = [{4: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 10: 6, 11: 7}, {10: 0, 11: 1, 12: 2, 13: 3, 14: 4, 15: 5, 16: 6}]
threshold = [3, 1]
for i, c in enumerate(df.os.unique()):
rank = os_rank[i]
for x in df.loc[df.os == c, 'ver'].unique():
if int(x) not in rank.keys():
g = rank.keys()
x = int(x)
rank[x] = min(g) - x if x < min(g) else rank[max(g)] + (x - max(g))
df.loc[df.os == c, 'ver'] = df.loc[df.os == c, 'ver'].apply(lambda y: rank[int(y)] + threshold[i])
del df['os']
df['ver'] = df['ver'].astype(np.float32)
import gc
gc.collect()
df
import pycountry
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_2
country = pd.read_csv('../input/country-name-hdi/csvData.csv')
s = country.hdi2019.median()
country['countryCode'] = country.country.apply(lambda x: countries.get(x, '0_0'))
country.loc[country.countryCode == '0_0', 'hdi2019'] = s
country.drop(['pop2022', 'country'], axis=1, inplace=True)
country.drop_duplicates(subset='countryCode', keep='first', inplace=True)
country.set_index('countryCode', inplace=True)
country = country.loc[:, 'hdi2019'].to_dict()
df.countryCode.fillna('0_0', inplace=True)
df['countryCode'] = df['countryCode'].apply(lambda x: country.get(x, s))
df['countryCode'] = df['countryCode'].astype(np.float32)
df | code |
106196473/cell_2 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
display(df.isnull().sum())
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
print(len(df))
df = agg_functions(df)
print(len(df))
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
display(df.isnull().sum()) | code |
106196473/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import gc
import numpy as np # linear algebra
import pandas as pd
import pycountry
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
df = agg_functions(df)
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
df['size'] = df['size'].apply(lambda x: x.split('x'))
df['size'] = df['size'].apply(lambda x: int(x[0]) * int(x[1]))
df['size'] = df['size'] * 0.0001
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: x.replace('-beta', '.') if 'beta' in x else x + '.0')
def ver_convert_num(ver):
ver = ver.str.split('.', expand=True)
digit = [2, 1, 1, 2]
for i, x in enumerate(ver.columns):
if int(ver[x].str.len().min()) == digit[i]:
continue
ver[x] = ver[x].apply(lambda x: x if len(x) == digit[i] else (digit[i] - len(x)) * '0' + x)
ver = ver.apply(lambda x: int(''.join(x)), axis=1)
return ver
df.mediationProviderVersion = ver_convert_num(df.mediationProviderVersion)
rank = {102100: 0, 103100: 1, 103200: 2, 103400: 3, 103500: 4, 103600: 5, 103700: 6, 110000: 7, 110003: 8, 110004: 9, 110005: 10, 110007: 11, 110101: 12, 110106: 13, 110110: 14, 111000: 15, 111100: 16, 111102: 17, 111103: 18, 111104: 19, 111200: 20, 111305: 21, 112003: 22, 112100: 23, 113100: 24, 113200: 25, 113201: 26, 113300: 27, 114000: 28, 114100: 29, 114200: 30, 114201: 31, 114300: 32, 114304: 33, 114400: 34}
unranked = []
for x in df.mediationProviderVersion.unique():
if x not in rank.keys():
unranked.append(x)
t = list(rank.keys())
tem = [i for i in unranked if i < t[0]]
def interver(tem, border, index):
global rank
tem.sort()
if len(tem) > 0:
tem.append(border)
for i, c in enumerate(tem):
if c == border:
continue
rank[c] = index - (i + 1) / len(tem)
interver(tem, t[0], 0)
for x in range(0, len(t), 2):
if x + 1 == len(t):
continue
tem = [i for i in unranked if i < t[x + 1] and i > t[x]]
interver(tem, t[x + 1], x + 1)
tem = [i for i in unranked if i < t[x] and i > t[x - 1]]
interver(tem, t[x], x)
tem = [i for i in unranked if i > t[-1]]
tem.sort()
if len(tem) > 0:
h = rank[t[-1]] + 1
for i, c in enumerate(tem):
if i + 1 == len(tem):
rank[c] = h
continue
rank[c] = h - (i + 1) / len(tem)
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: rank[x])
df['os'] = df.osAndVersion.apply(lambda x: x.split('-')[0])
df['ver'] = df.osAndVersion.apply(lambda x: x.split('-')[1].split('.')[0])
os_rank = [{4: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 10: 6, 11: 7}, {10: 0, 11: 1, 12: 2, 13: 3, 14: 4, 15: 5, 16: 6}]
threshold = [3, 1]
for i, c in enumerate(df.os.unique()):
rank = os_rank[i]
for x in df.loc[df.os == c, 'ver'].unique():
if int(x) not in rank.keys():
g = rank.keys()
x = int(x)
rank[x] = min(g) - x if x < min(g) else rank[max(g)] + (x - max(g))
df.loc[df.os == c, 'ver'] = df.loc[df.os == c, 'ver'].apply(lambda y: rank[int(y)] + threshold[i])
del df['os']
df['ver'] = df['ver'].astype(np.float32)
import gc
gc.collect()
df
import pycountry
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_2
country = pd.read_csv('../input/country-name-hdi/csvData.csv')
s = country.hdi2019.median()
country['countryCode'] = country.country.apply(lambda x: countries.get(x, '0_0'))
country.loc[country.countryCode == '0_0', 'hdi2019'] = s
country.drop(['pop2022', 'country'], axis=1, inplace=True)
country.drop_duplicates(subset='countryCode', keep='first', inplace=True)
country.set_index('countryCode', inplace=True)
country = country.loc[:, 'hdi2019'].to_dict()
df.countryCode.fillna('0_0', inplace=True)
df['countryCode'] = df['countryCode'].apply(lambda x: country.get(x, s))
df['countryCode'] = df['countryCode'].astype(np.float32)
df
from tqdm import tqdm
def preprocess(df1):
unique_dict = {'unitDisplayType': ['rewarded', 'interstitial', 'banner'], 'bundleId': ['com.tilegarden.match3', 'com.loop.match3d', 'com.tintash.nailsalon', '1502447854', 'se.ace.fishinc', 'com.kamilbilge.ropesavior3d', '1523081624', 'com.AppIdeas.LevelUpRunner', '1529614832', '1586795332', 'dovi.coalmining.inc', '1542256628', '1436213906', 'com.YayySAL.DodgeAgent', 'com.volt.dresstoimpress', '1579489488', '1582745578', '1569586264'], 'connectionType': ['3G', 'WIFI', 'UNKNOWN', '0_0'], 'c1': ['cb2', '7d3', '8bd', 'ad3', '59b', 'fdc', '7ca', '7b8', '9b5', 'dfa', '1ba', '2c1', 'ce2', '3db', '76e', '313', 'f8d', 'c2e', '82a', 'b22', 'f0f', '3dc', 'c17', '403', '707', '94f', 'cb3', '87b', '7be', 'd69', '00e', 'ea0', '397', '443', '466', '16a', '1c3', 'f99', 'f17', '05b', '2f6', '8f8', '49e', 'a65', '064', 'da9', 'd79', 'ab5', 'fd5', '541'], 'c3': ['6b', '79', '4b', '4e'], 'c2': ['6.0', '3.0', '9.0', '7.0', '8.0', '5.0', '2.0', '1.0', '4.0']}
def unique_one_hot(df2, name):
df2[name] = df2[name].astype(str)
df2[name].fillna('0_0', inplace=True)
df2[name] = df2[name].str.replace('nan', '0_0')
unique = unique_dict[name]
dummies = {x: y for x, y in zip(unique, pd.get_dummies(unique, dtype=np.uint8).values)}
one_hot = pd.DataFrame(columns=[f'{name}_{i}' for i, x in enumerate(unique)])
print(name)
one_hot[[f'{name}_{i}' for i, x in enumerate(unique)]] = np.array([dummies[x] for x in df2[name]])
one_hot = one_hot.astype({f'{name}_{i}': np.uint8 for i, x in enumerate(unique)})
df2 = df2.join(one_hot)
return df2
categorical = ['unitDisplayType', 'connectionType', 'bundleId', 'c3', 'c1']
for cat in categorical:
df1 = unique_one_hot(df1, cat)
df1.drop(categorical, axis=1, inplace=True)
gc.collect()
return df1
df = preprocess(df) | code |
106196473/cell_10 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import gc
import numpy as np # linear algebra
import pandas as pd
import pycountry
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
df = agg_functions(df)
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
df['size'] = df['size'].apply(lambda x: x.split('x'))
df['size'] = df['size'].apply(lambda x: int(x[0]) * int(x[1]))
df['size'] = df['size'] * 0.0001
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: x.replace('-beta', '.') if 'beta' in x else x + '.0')
def ver_convert_num(ver):
ver = ver.str.split('.', expand=True)
digit = [2, 1, 1, 2]
for i, x in enumerate(ver.columns):
if int(ver[x].str.len().min()) == digit[i]:
continue
ver[x] = ver[x].apply(lambda x: x if len(x) == digit[i] else (digit[i] - len(x)) * '0' + x)
ver = ver.apply(lambda x: int(''.join(x)), axis=1)
return ver
df.mediationProviderVersion = ver_convert_num(df.mediationProviderVersion)
rank = {102100: 0, 103100: 1, 103200: 2, 103400: 3, 103500: 4, 103600: 5, 103700: 6, 110000: 7, 110003: 8, 110004: 9, 110005: 10, 110007: 11, 110101: 12, 110106: 13, 110110: 14, 111000: 15, 111100: 16, 111102: 17, 111103: 18, 111104: 19, 111200: 20, 111305: 21, 112003: 22, 112100: 23, 113100: 24, 113200: 25, 113201: 26, 113300: 27, 114000: 28, 114100: 29, 114200: 30, 114201: 31, 114300: 32, 114304: 33, 114400: 34}
unranked = []
for x in df.mediationProviderVersion.unique():
if x not in rank.keys():
unranked.append(x)
t = list(rank.keys())
tem = [i for i in unranked if i < t[0]]
def interver(tem, border, index):
global rank
tem.sort()
if len(tem) > 0:
tem.append(border)
for i, c in enumerate(tem):
if c == border:
continue
rank[c] = index - (i + 1) / len(tem)
interver(tem, t[0], 0)
for x in range(0, len(t), 2):
if x + 1 == len(t):
continue
tem = [i for i in unranked if i < t[x + 1] and i > t[x]]
interver(tem, t[x + 1], x + 1)
tem = [i for i in unranked if i < t[x] and i > t[x - 1]]
interver(tem, t[x], x)
tem = [i for i in unranked if i > t[-1]]
tem.sort()
if len(tem) > 0:
h = rank[t[-1]] + 1
for i, c in enumerate(tem):
if i + 1 == len(tem):
rank[c] = h
continue
rank[c] = h - (i + 1) / len(tem)
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: rank[x])
df['os'] = df.osAndVersion.apply(lambda x: x.split('-')[0])
df['ver'] = df.osAndVersion.apply(lambda x: x.split('-')[1].split('.')[0])
os_rank = [{4: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 10: 6, 11: 7}, {10: 0, 11: 1, 12: 2, 13: 3, 14: 4, 15: 5, 16: 6}]
threshold = [3, 1]
for i, c in enumerate(df.os.unique()):
rank = os_rank[i]
for x in df.loc[df.os == c, 'ver'].unique():
if int(x) not in rank.keys():
g = rank.keys()
x = int(x)
rank[x] = min(g) - x if x < min(g) else rank[max(g)] + (x - max(g))
df.loc[df.os == c, 'ver'] = df.loc[df.os == c, 'ver'].apply(lambda y: rank[int(y)] + threshold[i])
del df['os']
df['ver'] = df['ver'].astype(np.float32)
import gc
gc.collect()
df
import pycountry
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_2
country = pd.read_csv('../input/country-name-hdi/csvData.csv')
s = country.hdi2019.median()
country['countryCode'] = country.country.apply(lambda x: countries.get(x, '0_0'))
country.loc[country.countryCode == '0_0', 'hdi2019'] = s
country.drop(['pop2022', 'country'], axis=1, inplace=True)
country.drop_duplicates(subset='countryCode', keep='first', inplace=True)
country.set_index('countryCode', inplace=True)
country = country.loc[:, 'hdi2019'].to_dict()
df.countryCode.fillna('0_0', inplace=True)
df['countryCode'] = df['countryCode'].apply(lambda x: country.get(x, s))
df['countryCode'] = df['countryCode'].astype(np.float32)
df
from tqdm import tqdm
def preprocess(df1):
unique_dict = {'unitDisplayType': ['rewarded', 'interstitial', 'banner'], 'bundleId': ['com.tilegarden.match3', 'com.loop.match3d', 'com.tintash.nailsalon', '1502447854', 'se.ace.fishinc', 'com.kamilbilge.ropesavior3d', '1523081624', 'com.AppIdeas.LevelUpRunner', '1529614832', '1586795332', 'dovi.coalmining.inc', '1542256628', '1436213906', 'com.YayySAL.DodgeAgent', 'com.volt.dresstoimpress', '1579489488', '1582745578', '1569586264'], 'connectionType': ['3G', 'WIFI', 'UNKNOWN', '0_0'], 'c1': ['cb2', '7d3', '8bd', 'ad3', '59b', 'fdc', '7ca', '7b8', '9b5', 'dfa', '1ba', '2c1', 'ce2', '3db', '76e', '313', 'f8d', 'c2e', '82a', 'b22', 'f0f', '3dc', 'c17', '403', '707', '94f', 'cb3', '87b', '7be', 'd69', '00e', 'ea0', '397', '443', '466', '16a', '1c3', 'f99', 'f17', '05b', '2f6', '8f8', '49e', 'a65', '064', 'da9', 'd79', 'ab5', 'fd5', '541'], 'c3': ['6b', '79', '4b', '4e'], 'c2': ['6.0', '3.0', '9.0', '7.0', '8.0', '5.0', '2.0', '1.0', '4.0']}
def unique_one_hot(df2, name):
df2[name] = df2[name].astype(str)
df2[name].fillna('0_0', inplace=True)
df2[name] = df2[name].str.replace('nan', '0_0')
unique = unique_dict[name]
dummies = {x: y for x, y in zip(unique, pd.get_dummies(unique, dtype=np.uint8).values)}
one_hot = pd.DataFrame(columns=[f'{name}_{i}' for i, x in enumerate(unique)])
one_hot[[f'{name}_{i}' for i, x in enumerate(unique)]] = np.array([dummies[x] for x in df2[name]])
one_hot = one_hot.astype({f'{name}_{i}': np.uint8 for i, x in enumerate(unique)})
df2 = df2.join(one_hot)
return df2
categorical = ['unitDisplayType', 'connectionType', 'bundleId', 'c3', 'c1']
for cat in categorical:
df1 = unique_one_hot(df1, cat)
df1.drop(categorical, axis=1, inplace=True)
gc.collect()
return df1
df = preprocess(df)
df | code |
106196473/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import gc
import numpy as np # linear algebra
import pandas as pd
import pycountry
import tensorflow as tf
import numpy as np
import pandas as pd
import gc
train = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/train_data.csv')
df = pd.read_csv('../input/digital-turbine-auction-bid-price-prediction/test_data.csv')
def agg_functions(df1):
features = ['c1', 'deviceId', 'unitDisplayType', 'bundleId']
global train
for x in features:
t = train.groupby(x)['winBid'].agg(['std', 'max', 'min', 'mean', 'median', 'var', 'sem'])
t.columns = [x + c + '_bidWin' for c in t.columns]
t = t.astype({c: np.float32 for c in t.columns})
t.reset_index(inplace=True)
df1 = df1.merge(t, on=x, how='left')
gc.collect()
return df1
df = agg_functions(df)
train = agg_functions(train)
s = train.deviceIdstd_bidWin.mean()
df['deviceIdstd_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmin_bidWin.mean()
df['deviceIdmin_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmax_bidWin.mean()
df['deviceIdmax_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmean_bidWin.mean()
df['deviceIdmean_bidWin'].fillna(s, inplace=True)
s = train.deviceIdmedian_bidWin.mean()
df['deviceIdmedian_bidWin'].fillna(s, inplace=True)
s = train.deviceIdvar_bidWin.mean()
df['deviceIdvar_bidWin'].fillna(s, inplace=True)
s = train.deviceIdsem_bidWin.mean()
df['deviceIdsem_bidWin'].fillna(s, inplace=True)
del train
gc.collect()
cols = ['unitDisplayTypemin_bidWin']
df.drop(cols, axis=1, inplace=True)
df['size'] = df['size'].apply(lambda x: x.split('x'))
df['size'] = df['size'].apply(lambda x: int(x[0]) * int(x[1]))
df['size'] = df['size'] * 0.0001
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: x.replace('-beta', '.') if 'beta' in x else x + '.0')
def ver_convert_num(ver):
ver = ver.str.split('.', expand=True)
digit = [2, 1, 1, 2]
for i, x in enumerate(ver.columns):
if int(ver[x].str.len().min()) == digit[i]:
continue
ver[x] = ver[x].apply(lambda x: x if len(x) == digit[i] else (digit[i] - len(x)) * '0' + x)
ver = ver.apply(lambda x: int(''.join(x)), axis=1)
return ver
df.mediationProviderVersion = ver_convert_num(df.mediationProviderVersion)
rank = {102100: 0, 103100: 1, 103200: 2, 103400: 3, 103500: 4, 103600: 5, 103700: 6, 110000: 7, 110003: 8, 110004: 9, 110005: 10, 110007: 11, 110101: 12, 110106: 13, 110110: 14, 111000: 15, 111100: 16, 111102: 17, 111103: 18, 111104: 19, 111200: 20, 111305: 21, 112003: 22, 112100: 23, 113100: 24, 113200: 25, 113201: 26, 113300: 27, 114000: 28, 114100: 29, 114200: 30, 114201: 31, 114300: 32, 114304: 33, 114400: 34}
unranked = []
for x in df.mediationProviderVersion.unique():
if x not in rank.keys():
unranked.append(x)
t = list(rank.keys())
tem = [i for i in unranked if i < t[0]]
def interver(tem, border, index):
global rank
tem.sort()
if len(tem) > 0:
tem.append(border)
for i, c in enumerate(tem):
if c == border:
continue
rank[c] = index - (i + 1) / len(tem)
interver(tem, t[0], 0)
for x in range(0, len(t), 2):
if x + 1 == len(t):
continue
tem = [i for i in unranked if i < t[x + 1] and i > t[x]]
interver(tem, t[x + 1], x + 1)
tem = [i for i in unranked if i < t[x] and i > t[x - 1]]
interver(tem, t[x], x)
tem = [i for i in unranked if i > t[-1]]
tem.sort()
if len(tem) > 0:
h = rank[t[-1]] + 1
for i, c in enumerate(tem):
if i + 1 == len(tem):
rank[c] = h
continue
rank[c] = h - (i + 1) / len(tem)
df.mediationProviderVersion = df.mediationProviderVersion.apply(lambda x: rank[x])
df['os'] = df.osAndVersion.apply(lambda x: x.split('-')[0])
df['ver'] = df.osAndVersion.apply(lambda x: x.split('-')[1].split('.')[0])
os_rank = [{4: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 10: 6, 11: 7}, {10: 0, 11: 1, 12: 2, 13: 3, 14: 4, 15: 5, 16: 6}]
threshold = [3, 1]
for i, c in enumerate(df.os.unique()):
rank = os_rank[i]
for x in df.loc[df.os == c, 'ver'].unique():
if int(x) not in rank.keys():
g = rank.keys()
x = int(x)
rank[x] = min(g) - x if x < min(g) else rank[max(g)] + (x - max(g))
df.loc[df.os == c, 'ver'] = df.loc[df.os == c, 'ver'].apply(lambda y: rank[int(y)] + threshold[i])
del df['os']
df['ver'] = df['ver'].astype(np.float32)
import gc
gc.collect()
df
import pycountry
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_2
country = pd.read_csv('../input/country-name-hdi/csvData.csv')
s = country.hdi2019.median()
country['countryCode'] = country.country.apply(lambda x: countries.get(x, '0_0'))
country.loc[country.countryCode == '0_0', 'hdi2019'] = s
country.drop(['pop2022', 'country'], axis=1, inplace=True)
country.drop_duplicates(subset='countryCode', keep='first', inplace=True)
country.set_index('countryCode', inplace=True)
country = country.loc[:, 'hdi2019'].to_dict()
df.countryCode.fillna('0_0', inplace=True)
df['countryCode'] = df['countryCode'].apply(lambda x: country.get(x, s))
df['countryCode'] = df['countryCode'].astype(np.float32)
df
from tqdm import tqdm
def preprocess(df1):
unique_dict = {'unitDisplayType': ['rewarded', 'interstitial', 'banner'], 'bundleId': ['com.tilegarden.match3', 'com.loop.match3d', 'com.tintash.nailsalon', '1502447854', 'se.ace.fishinc', 'com.kamilbilge.ropesavior3d', '1523081624', 'com.AppIdeas.LevelUpRunner', '1529614832', '1586795332', 'dovi.coalmining.inc', '1542256628', '1436213906', 'com.YayySAL.DodgeAgent', 'com.volt.dresstoimpress', '1579489488', '1582745578', '1569586264'], 'connectionType': ['3G', 'WIFI', 'UNKNOWN', '0_0'], 'c1': ['cb2', '7d3', '8bd', 'ad3', '59b', 'fdc', '7ca', '7b8', '9b5', 'dfa', '1ba', '2c1', 'ce2', '3db', '76e', '313', 'f8d', 'c2e', '82a', 'b22', 'f0f', '3dc', 'c17', '403', '707', '94f', 'cb3', '87b', '7be', 'd69', '00e', 'ea0', '397', '443', '466', '16a', '1c3', 'f99', 'f17', '05b', '2f6', '8f8', '49e', 'a65', '064', 'da9', 'd79', 'ab5', 'fd5', '541'], 'c3': ['6b', '79', '4b', '4e'], 'c2': ['6.0', '3.0', '9.0', '7.0', '8.0', '5.0', '2.0', '1.0', '4.0']}
def unique_one_hot(df2, name):
df2[name] = df2[name].astype(str)
df2[name].fillna('0_0', inplace=True)
df2[name] = df2[name].str.replace('nan', '0_0')
unique = unique_dict[name]
dummies = {x: y for x, y in zip(unique, pd.get_dummies(unique, dtype=np.uint8).values)}
one_hot = pd.DataFrame(columns=[f'{name}_{i}' for i, x in enumerate(unique)])
one_hot[[f'{name}_{i}' for i, x in enumerate(unique)]] = np.array([dummies[x] for x in df2[name]])
one_hot = one_hot.astype({f'{name}_{i}': np.uint8 for i, x in enumerate(unique)})
df2 = df2.join(one_hot)
return df2
categorical = ['unitDisplayType', 'connectionType', 'bundleId', 'c3', 'c1']
for cat in categorical:
df1 = unique_one_hot(df1, cat)
df1.drop(categorical, axis=1, inplace=True)
gc.collect()
return df1
df = preprocess(df)
import tensorflow as tf
dc = ['eventTimestamp', 'brandName', 'appVersion', 'correctModelName', 'deviceId', 'osAndVersion']
model = tf.keras.models.load_model('../input/eda-item-price/train_76.408043.h5')
out = model.predict(df.drop(dc, axis=1), batch_size=512)
df.to_csv('submission.csv', index=False) | code |
1009481/cell_13 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import datetime
import numpy as np
import re
def calc_len_partial(X_train, limit=15):
i = 1
partial_len = len(X_train)
div_len = 0
while i:
if partial_len % 2:
partial_len = len(X_train) / 2
div_len += 2
elif partial_len % 3:
partial_len = len(X_train) / 3
div_len += 3
elif partial_len % 5:
partial_len = len(X_train) / 5
div_len += 5
elif partial_len % 7:
partial_len = len(X_train) / 7
div_len += 7
elif partial_len % 11:
partial_len = len(X_train) / 11
div_len += 11
else:
break
if div_len > limit:
break
return len(X_train) / div_len
def preprocessor(text):
text = str(text)
text = re.sub('<[^>]*>', '', text)
text = re.sub('[\\W]+', ' ', text.lower())
text = text.rstrip().lstrip()
return text
stop = stopwords.words('english')
vect = HashingVectorizer(decode_error='ignore', n_features=2 ** 21, preprocessor=preprocessor, stop_words=stop, ngram_range=(1, 3))
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
len_partial = int(calc_len_partial(X_train=X_train, limit=20))
classes = np.array([0, 1, 2])
for i in range(round(len(X_train) / len_partial)):
X_train_ml = X_train[i:len_partial * (i + 1)]
y_train_ml = y_train[i:len_partial * (i + 1)]
X_train_ml = vect.transform(X_train_ml)
clf.partial_fit(X_train_ml, y_train_ml, classes=classes)
X_test_ml = vect.transform(X_test)
print('Testing accuracy: {:.3f}%'.format(clf.score(X_test_ml, y_test) * 100))
clf = clf.partial_fit(X_test_ml, y_test) | code |
1009481/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.cross_validation import train_test_split
from nltk.corpus import stopwords
from subprocess import check_output
import datetime
import re
print(check_output(['ls', '../input']).decode('utf8')) | code |
1009481/cell_11 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import datetime
import numpy as np
import re
def calc_len_partial(X_train, limit=15):
i = 1
partial_len = len(X_train)
div_len = 0
while i:
if partial_len % 2:
partial_len = len(X_train) / 2
div_len += 2
elif partial_len % 3:
partial_len = len(X_train) / 3
div_len += 3
elif partial_len % 5:
partial_len = len(X_train) / 5
div_len += 5
elif partial_len % 7:
partial_len = len(X_train) / 7
div_len += 7
elif partial_len % 11:
partial_len = len(X_train) / 11
div_len += 11
else:
break
if div_len > limit:
break
return len(X_train) / div_len
def preprocessor(text):
text = str(text)
text = re.sub('<[^>]*>', '', text)
text = re.sub('[\\W]+', ' ', text.lower())
text = text.rstrip().lstrip()
return text
stop = stopwords.words('english')
vect = HashingVectorizer(decode_error='ignore', n_features=2 ** 21, preprocessor=preprocessor, stop_words=stop, ngram_range=(1, 3))
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
print(datetime.datetime.now())
len_partial = int(calc_len_partial(X_train=X_train, limit=20))
classes = np.array([0, 1, 2])
for i in range(round(len(X_train) / len_partial)):
X_train_ml = X_train[i:len_partial * (i + 1)]
y_train_ml = y_train[i:len_partial * (i + 1)]
X_train_ml = vect.transform(X_train_ml)
clf.partial_fit(X_train_ml, y_train_ml, classes=classes)
print(datetime.datetime.now()) | code |
1009481/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_json('../input/train.json')
df[1:3] | code |
1009481/cell_12 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import datetime
import numpy as np
import re
def calc_len_partial(X_train, limit=15):
i = 1
partial_len = len(X_train)
div_len = 0
while i:
if partial_len % 2:
partial_len = len(X_train) / 2
div_len += 2
elif partial_len % 3:
partial_len = len(X_train) / 3
div_len += 3
elif partial_len % 5:
partial_len = len(X_train) / 5
div_len += 5
elif partial_len % 7:
partial_len = len(X_train) / 7
div_len += 7
elif partial_len % 11:
partial_len = len(X_train) / 11
div_len += 11
else:
break
if div_len > limit:
break
return len(X_train) / div_len
def preprocessor(text):
text = str(text)
text = re.sub('<[^>]*>', '', text)
text = re.sub('[\\W]+', ' ', text.lower())
text = text.rstrip().lstrip()
return text
stop = stopwords.words('english')
vect = HashingVectorizer(decode_error='ignore', n_features=2 ** 21, preprocessor=preprocessor, stop_words=stop, ngram_range=(1, 3))
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
len_partial = int(calc_len_partial(X_train=X_train, limit=20))
classes = np.array([0, 1, 2])
for i in range(round(len(X_train) / len_partial)):
X_train_ml = X_train[i:len_partial * (i + 1)]
y_train_ml = y_train[i:len_partial * (i + 1)]
X_train_ml = vect.transform(X_train_ml)
clf.partial_fit(X_train_ml, y_train_ml, classes=classes)
print('Training accuracy: {:.3f}%'.format(clf.score(vect.transform(X_train), y_train) * 100)) | code |
122250162/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer(as_frame=True)
breast_cancer = load_breast_cancer()
breast_cancer.DESCR
breast_cancer.feature_names
breast_cancer.target_names
breast_cancer.data
breast_cancer.target | code |
122250162/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer(as_frame=True)
breast_cancer = load_breast_cancer()
breast_cancer.DESCR | code |
122250162/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer(as_frame=True)
breast_cancer = load_breast_cancer()
breast_cancer.DESCR
breast_cancer.feature_names
breast_cancer.target_names | code |
122250162/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer(as_frame=True)
breast_cancer = load_breast_cancer()
breast_cancer.DESCR
breast_cancer.feature_names | code |
122250162/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer(as_frame=True)
breast_cancer = load_breast_cancer()
breast_cancer.DESCR
breast_cancer.feature_names
breast_cancer.target_names
breast_cancer.data | code |
329301/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
329301/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.grid_search import GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.label.values.ravel()
X_train = train.values[:, 1:]
X_test = test.values
def normalized_data():
X = np.concatenate((X_train, X_test))
x_test = X_test - X.mean(0)
x_train = X_train - X.mean(0)
x_test /= x_test.std(1).reshape((-1, 1))
x_train /= x_train.std(1).reshape((-1, 1))
return (x_train, x_test)
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
def testCLF(clf, parameters, X):
search = GridSearchCV(clf, parameters)
search.fit(X, y_train)
print(search.score)
return search.best_estimator_ | code |
329301/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.decomposition import PCA, RandomizedPCA
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train.label.values.ravel()
X_train = train.values[:, 1:]
X_test = test.values
def show(array):
array = array.reshape(28, 28)
def plot_gallery(images, n_row=2, n_col=5):
h = w = 28
'Helper function to plot a gallery of portraits'
for i in range(n_row * n_col):
plt.xticks(())
plt.yticks(())
def normalized_data():
X = np.concatenate((X_train, X_test))
x_test = X_test - X.mean(0)
x_train = X_train - X.mean(0)
x_test /= x_test.std(1).reshape((-1, 1))
x_train /= x_train.std(1).reshape((-1, 1))
return (x_train, x_test)
from sklearn.decomposition import PCA, RandomizedPCA
def components(n_components, random=False, show_components=True):
x_train, x_test = normalized_data()
pca = PCA(n_components=n_components)
if random:
pca = RandomPCA(n_components=n_components)
pca.fit(x_train)
components = pca.components_
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
return (x_train, x_test)
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
def testCLF(clf, parameters, X):
search = GridSearchCV(clf, parameters)
search.fit(X, y_train)
return search.best_estimator_
clf = SVC()
parameters = {'C': [0.5, 1.0], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'], 'degree': [2, 3]}
X = components(10)[0]
testCLF(clf, parameters, X) | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.