path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
48163903/cell_5 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pandas as pd
import re
df_train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df_test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
df_sub = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv')
def preprocessing(df):
derlem = []
for i in range(len(df.text)):
text = re.sub('https?://\\S+', '', df.text[i])
text = re.sub('http?://\\S+', '', text)
text = re.sub('[^a-zA-Z]', ' ', text)
text = re.sub('\\n', ' ', text)
text = re.sub('\\s+', ' ', text).strip()
text = text.lower()
text = text.split()
text = [WordNetLemmatizer().lemmatize(kelime) for kelime in text if not kelime in set(stopwords.words('english'))]
text = ' '.join(text)
derlem.append(text)
df['clean_text'] = derlem
return df
df_test = preprocessing(df_test)
df_train = preprocessing(df_train)
print(df_train.text[417])
print(df_train.clean_text[417]) | code |
34119268/cell_21 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
(len(data.classes), data.c)
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4) | code |
34119268/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls() | code |
34119268/cell_25 | [
"image_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
(len(data.classes), data.c)
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4)
learn.recorder.plot() | code |
34119268/cell_23 | [
"text_html_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
(len(data.classes), data.c)
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(9, figsize=(15, 11)) | code |
34119268/cell_20 | [
"image_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
(len(data.classes), data.c)
learn = cnn_learner(data, models.resnet34, metrics=error_rate) | code |
34119268/cell_6 | [
"image_output_1.png"
] | from pathlib import Path
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls() | code |
34119268/cell_16 | [
"text_plain_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
data.show_batch(rows=3, figsize=(7, 6)) | code |
34119268/cell_17 | [
"text_plain_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
print(data.classes)
(len(data.classes), data.c) | code |
34119268/cell_14 | [
"text_plain_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes | code |
34119268/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
bs = 16
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths
tfms = get_transforms(do_flip=False)
pat = '/([^/]*)/[^/]*.jpg$'
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=224, bs=bs)
data.classes
(len(data.classes), data.c)
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4)
interp = ClassificationInterpretation.from_learner(learn) | code |
34119268/cell_10 | [
"text_plain_output_1.png"
] | from pathlib import Path
from pathlib import Path
path = Path('../input/earphones/earphone_dataset')
path.ls()
mi = path / 'redmi_airdots'
galaxy = path / 'galaxy_buds'
airpods = path / 'iphone_airpods'
mi.ls()
fn_paths = []
fn_paths = fn_paths + mi.ls() + galaxy.ls() + airpods.ls()
fn_paths | code |
1003611/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | data = pd.read_csv('../input/DSL-StrongPasswordData.csv', header=0)
data = data.reset_index()
hold_cols = [x for x in data.columns if x.startswith('H.')]
switch_cols = [x for x in data.columns if x.startswith('UD.')]
timing_cols = [x for x in data.columns if x.startswith('H.') or x.startswith('UD.')]
def get_subject_data(*subjects, include_subject=False):
""" Returns the timing information for each trial of the given user
The delays alternate between key hold time and delay to the next key
"""
cols = timing_cols + (['subject'] if include_subject else [])
return data.ix[data.subject.isin(subjects), cols]
s24 = get_subject_data('s002', 's004', include_subject=True)
def plot_comparison(*subjects):
data = get_subject_data(*subjects, include_subject=True)
fig = plt.figure(figsize=(12, 12))
for i, delay in enumerate(timing_cols):
ax = fig.add_subplot(5, 5, i + 1)
for u in subjects:
_ = data.ix[data.subject == u].hist(delay, ax=ax, alpha=0.75, label=u)
return fig
_ = plot_comparison('s052', 's036', 's002') | code |
1003611/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | data = pd.read_csv('../input/DSL-StrongPasswordData.csv', header=0)
data = data.reset_index()
hold_cols = [x for x in data.columns if x.startswith('H.')]
switch_cols = [x for x in data.columns if x.startswith('UD.')]
timing_cols = [x for x in data.columns if x.startswith('H.') or x.startswith('UD.')]
def get_subject_data(*subjects, include_subject=False):
""" Returns the timing information for each trial of the given user
The delays alternate between key hold time and delay to the next key
"""
cols = timing_cols + (['subject'] if include_subject else [])
return data.ix[data.subject.isin(subjects), cols]
s24 = get_subject_data('s002', 's004', include_subject=True)
s24.head() | code |
1003611/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | data = pd.read_csv('../input/DSL-StrongPasswordData.csv', header=0)
data = data.reset_index()
data.head() | code |
1003611/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pandas.tools.plotting import scatter_matrix
data = pd.read_csv('../input/DSL-StrongPasswordData.csv', header=0)
data = data.reset_index()
hold_cols = [x for x in data.columns if x.startswith('H.')]
switch_cols = [x for x in data.columns if x.startswith('UD.')]
timing_cols = [x for x in data.columns if x.startswith('H.') or x.startswith('UD.')]
def get_subject_data(*subjects, include_subject=False):
""" Returns the timing information for each trial of the given user
The delays alternate between key hold time and delay to the next key
"""
cols = timing_cols + (['subject'] if include_subject else [])
return data.ix[data.subject.isin(subjects), cols]
s24 = get_subject_data('s002', 's004', include_subject=True)
from pandas.tools.plotting import scatter_matrix
_ = scatter_matrix(get_subject_data('s052').ix[:, switch_cols], alpha=0.2, figsize=(12, 12), diagonal='kde') | code |
1003611/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pandas.tools.plotting import scatter_matrix
from pandas.tools.plotting import scatter_matrix
data = pd.read_csv('../input/DSL-StrongPasswordData.csv', header=0)
data = data.reset_index()
hold_cols = [x for x in data.columns if x.startswith('H.')]
switch_cols = [x for x in data.columns if x.startswith('UD.')]
timing_cols = [x for x in data.columns if x.startswith('H.') or x.startswith('UD.')]
def get_subject_data(*subjects, include_subject=False):
""" Returns the timing information for each trial of the given user
The delays alternate between key hold time and delay to the next key
"""
cols = timing_cols + (['subject'] if include_subject else [])
return data.ix[data.subject.isin(subjects), cols]
s24 = get_subject_data('s002', 's004', include_subject=True)
from pandas.tools.plotting import scatter_matrix
_ = scatter_matrix(get_subject_data('s052').ix[:, switch_cols], alpha=0.2, figsize=(12, 12), diagonal='kde')
_ = scatter_matrix(get_subject_data('s052').ix[:, hold_cols], alpha=0.2, figsize=(12, 12), diagonal='kde') | code |
72098732/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/kk30ml/train.csv', index_col=0)
test = pd.read_csv('../input/kk30ml/test.csv', index_col=0)
train
y = train['target']
X = train.drop(['target'], axis=1)
X.head() | code |
72098732/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/kk30ml/train.csv', index_col=0)
test = pd.read_csv('../input/kk30ml/test.csv', index_col=0)
train | code |
72098732/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72098732/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
model = RandomForestRegressor()
model.fit(X_train, y_train)
pred = model.predict(X_valid)
mse = mean_squared_error(y_valid, pred, squared=False)
print(mse) | code |
72098732/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/kk30ml/train.csv', index_col=0)
test = pd.read_csv('../input/kk30ml/test.csv', index_col=0)
train
test.head() | code |
72098732/cell_5 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/kk30ml/train.csv', index_col=0)
test = pd.read_csv('../input/kk30ml/test.csv', index_col=0)
train
y = train['target']
X = train.drop(['target'], axis=1)
from sklearn.preprocessing import OrdinalEncoder
cat = X.dtypes == 'object'
cat_l = list(cat[cat].index)
X_Tr = X.copy()
X_test = test.copy()
ordinal_encoder = OrdinalEncoder()
X_Tr[cat_l] = ordinal_encoder.fit_transform(X[cat_l])
X_test[cat_l] = ordinal_encoder.transform(X_test[cat_l])
X_Tr.head() | code |
2004802/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
plt.figure(figsize=(15, 8))
sns.set_style('whitegrid')
ax = sns.countplot(x='Title', data=full_set)
ax.set_ylabel('COUNT', size=20, color='black', alpha=0.5)
ax.set_xlabel('TITLE', size=20, color='black', alpha=0.5)
ax.set_title('COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION', size=20, color='black', alpha=0.5) | code |
2004802/cell_34 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
plt.figure(figsize=(15,8))
sns.set_style("whitegrid")
ax=sns.countplot(x="Title", data=full_set)
ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5)
ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5)
ax.set_title("COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION",size = 20,color="black",alpha=0.5)
full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare'
full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.'
plt.figure(figsize=(15,8))
sns.set_style("whitegrid")
ax=sns.countplot(x="Title", data=full_set)
ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5)
ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5)
ax.set_title("COUNT OF TITLES IN EACH CATEGORY AFTER COMBINATION",size = 20,color="black",alpha=0.5)
family_size_survival = full_set[['FamilyMembers', 'Survived']].groupby(['FamilyMembers'], as_index=False).count().sort_values(by='Survived', ascending=False)
plt.figure(figsize=(15, 8))
sns.set_style('whitegrid')
ax = sns.barplot(x='FamilyMembers', y='Survived', data=family_size_survival)
ax.set_title('SURVIVED PASSENGER COUNT BASED ON FAMILY SIZE', size=20, color='black', alpha=0.5)
ax.set_ylabel('NUMBER SURVIVED', size=20, color='black', alpha=0.5)
ax.set_xlabel('FAMILY SIZE', size=20, color='black', alpha=0.5) | code |
2004802/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
plt.figure(figsize=(15,8))
sns.set_style("whitegrid")
ax=sns.countplot(x="Title", data=full_set)
ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5)
ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5)
ax.set_title("COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION",size = 20,color="black",alpha=0.5)
full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare'
full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.'
plt.figure(figsize=(15, 8))
sns.set_style('whitegrid')
ax = sns.countplot(x='Title', data=full_set)
ax.set_ylabel('COUNT', size=20, color='black', alpha=0.5)
ax.set_xlabel('TITLE', size=20, color='black', alpha=0.5)
ax.set_title('COUNT OF TITLES IN EACH CATEGORY AFTER COMBINATION', size=20, color='black', alpha=0.5) | code |
2004802/cell_29 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare'
full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.'
print(full_set.Title.value_counts()) | code |
2004802/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare'
full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.'
""" 1 ---Family Size =1
2 ---Family Size between 2 and 4(included)
3 ---Family Size more than 4"""
family_size = []
for row in full_set.FamilyMembers:
if row in [1]:
family_size.append(1)
elif row in [2, 3, 4]:
family_size.append(2)
else:
family_size.append(3)
full_set['FamilySize'] = family_size
full_set[full_set['Embarked'].isnull()] | code |
2004802/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
print('\n\nInformation about Null/ empty data points in each Column of Test set\n\n')
print(test_full_set.info()) | code |
2004802/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
print(corr)
plt.figure(figsize=(20, 20))
plt.imshow(corr, cmap='GnBu')
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
plt.suptitle('Correlation Matrix', fontsize=15, fontweight='bold')
plt.show() | code |
2004802/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
print(full_set.head(5)) | code |
2004802/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
print(full_set.isnull().sum()) | code |
2004802/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
print(full_set.Title.value_counts()) | code |
2004802/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True)
full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
corr = full_set_initial.corr()
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns)
test_full_set = pd.read_csv('../input/test.csv')
full_set = pd.concat([train_full_set, test_full_set])
full_set = full_set.reset_index(drop=True)
full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare'
full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.'
""" 1 ---Family Size =1
2 ---Family Size between 2 and 4(included)
3 ---Family Size more than 4"""
family_size = []
for row in full_set.FamilyMembers:
if row in [1]:
family_size.append(1)
elif row in [2, 3, 4]:
family_size.append(2)
else:
family_size.append(3)
full_set['FamilySize'] = family_size
print('\n\n Number of null in each column before imputing:\n')
print(full_set.isnull().sum()) | code |
2004802/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train_full_set = pd.read_csv('../input/train.csv')
print('/n/nInformation about Null/ empty data points in each Column of Training set\n\n')
print(train_full_set.info()) | code |
128021214/cell_21 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
new_data = pd.DataFrame([[5.1, 3.5, 1.4, 0.2], [6.2, 2.8, 4.8, 1.8], [7.3, 3.0, 6.3, 2.5]])
new_data.columns = X.columns
predictions = model.predict(new_data.values)
print(le.inverse_transform(predictions)) | code |
128021214/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
display(df_iris.head(3))
display(df_iris.tail(3))
display(df_iris.describe()) | code |
128021214/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
new_data = pd.DataFrame([[5.1, 3.5, 1.4, 0.2], [6.2, 2.8, 4.8, 1.8], [7.3, 3.0, 6.3, 2.5]])
new_data.columns = X.columns
display(new_data) | code |
128021214/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size() | code |
128021214/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
from pandas.plotting import andrews_curves
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns | code |
128021214/cell_11 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | from pandas.plotting import andrews_curves
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
andrews_curves(df_iris.drop('Id', axis=1), 'Species')
plt.figure()
sns.pairplot(df_iris.drop('Id', axis=1), hue='Species', height=3, markers=['o', 's', 'D'])
plt.show() | code |
128021214/cell_18 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_pred, y_test, target_names=list(le.classes_))) | code |
128021214/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
display(X.head(3), y.head(3))
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
display(X_train.describe(), y_test.describe()) | code |
128021214/cell_15 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())])
model.fit(X_train, y_train) | code |
128021214/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pandas.plotting import andrews_curves
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
plt.figure(figsize=(15, 10))
andrews_curves(df_iris.drop('Id', axis=1), 'Species')
plt.title('Andrews Curves Plot', fontsize=20, fontweight='bold')
plt.legend(loc=1, prop={'size': 15}, frameon=True, facecolor='white', edgecolor='black')
plt.show() | code |
128021214/cell_12 | [
"text_plain_output_1.png"
] | from pandas.plotting import andrews_curves
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv'))
df_iris.groupby('Species').size()
X = df_iris.iloc[:, 1:5]
y = pd.DataFrame(df_iris.iloc[:, 5])
le = LabelEncoder()
y['Species'] = le.fit_transform(y['Species'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
andrews_curves(df_iris.drop('Id', axis=1), 'Species')
plt.figure()
df_iris.drop('Id', axis=1).boxplot(by='Species', figsize=(15, 10))
plt.show() | code |
2010993/cell_42 | [
"text_plain_output_1.png"
] | from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
gnb_predict = gnb.predict(x_test)
gnb_predict_prob = gnb.predict_proba(x_test)
fpr, tpr, thresholds = roc_curve(y_test, gnb_predict_prob[:, 1])
gnb_auc = auc(fpr, tpr)
print(gnb_auc) | code |
2010993/cell_13 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
y.shape | code |
2010993/cell_25 | [
"image_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
x_new.shape | code |
2010993/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(x_train, y_train)
lr_predict = lr.predict(x_test)
lr_conf_matrix = confusion_matrix(y_test, lr_predict)
lr_accuracy = accuracy_score(y_test, lr_predict)
print(lr_conf_matrix)
print(lr_accuracy) | code |
2010993/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2)
k_means.fit_predict(x_new) | code |
2010993/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape | code |
2010993/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
gnb_predict = gnb.predict(x_test)
gnb_predict_prob = gnb.predict_proba(x_test)
print(gnb_predict)
print(gnb_predict_prob) | code |
2010993/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split | code |
2010993/cell_48 | [
"text_plain_output_1.png"
] | from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=10)
dt.fit(x_train, y_train)
dt_predict = dt.predict(x_test)
dt_predict_prob = dt.predict_proba(x_test)
dt_conf_matrix = confusion_matrix(y_test, dt_predict)
dt_accuracy_score = accuracy_score(y_test, dt_predict)
print(dt_conf_matrix)
print(dt_accuracy_score) | code |
2010993/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
gnb_predict = gnb.predict(x_test)
gnb_predict_prob = gnb.predict_proba(x_test)
gnb_conf_matrix = confusion_matrix(y_test, gnb_predict)
gnb_accuracy_score = accuracy_score(y_test, gnb_predict)
print(gnb_conf_matrix)
print(gnb_accuracy_score) | code |
2010993/cell_54 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=10)
rf.fit(x_train, y_train)
rf_predict = rf.predict(x_test)
rf_predict_prob = rf.predict_proba(x_test)
fpr, tpr, thresholds = roc_curve(y_test, rf_predict_prob[:, 1])
rf_auc = auc(fpr, tpr)
print(rf_auc) | code |
2010993/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(x)
plt.figure(figsize=(16, 11))
plt.plot(np.cumsum(pca.explained_variance_ratio_), 'ro-')
plt.grid() | code |
2010993/cell_50 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_curve, auc
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2)
k_means.fit_predict(x_new)
colors = ['r', 'g']
from sklearn.metrics import auc
lr_auc = auc(fpr, tpr)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
gnb_predict = gnb.predict(x_test)
gnb_predict_prob = gnb.predict_proba(x_test)
fpr, tpr, thresholds = roc_curve(y_test, gnb_predict_prob[:, 1])
gnb_auc = auc(fpr, tpr)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=10)
dt.fit(x_train, y_train)
dt_predict = dt.predict(x_test)
dt_predict_prob = dt.predict_proba(x_test)
from sklearn.metrics import roc_curve, auc
fpr, tpr, thresholds = roc_curve(y_test, dt_predict_prob[:, 1])
dt_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 9))
plt.plot(fpr, tpr, label='AUC %0.2f' % dt_auc)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlabel('False Positive rate')
plt.ylabel('True Positive rate')
plt.legend()
plt.grid() | code |
2010993/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
data.head() | code |
2010993/cell_49 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_curve, auc
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=10)
dt.fit(x_train, y_train)
dt_predict = dt.predict(x_test)
dt_predict_prob = dt.predict_proba(x_test)
from sklearn.metrics import roc_curve, auc
fpr, tpr, thresholds = roc_curve(y_test, dt_predict_prob[:, 1])
dt_auc = auc(fpr, tpr)
print(dt_auc) | code |
2010993/cell_28 | [
"image_output_1.png"
] | print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape) | code |
2010993/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
y.shape
y.head | code |
2010993/cell_3 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2010993/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
print(x) | code |
2010993/cell_43 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2)
k_means.fit_predict(x_new)
colors = ['r', 'g']
from sklearn.metrics import auc
lr_auc = auc(fpr, tpr)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
gnb_predict = gnb.predict(x_test)
gnb_predict_prob = gnb.predict_proba(x_test)
fpr, tpr, thresholds = roc_curve(y_test, gnb_predict_prob[:, 1])
gnb_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 9))
plt.plot(fpr, tpr, label='AUC %0.2f' % gnb_auc)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.legend() | code |
2010993/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(x_train, y_train)
lr_predict = lr.predict(x_test)
lr_predict_prob = lr.predict_proba(x_test)
print(lr_predict)
print(lr_predict_prob[:, 1]) | code |
2010993/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2)
k_means.fit_predict(x_new)
colors = ['r', 'g']
for i in range(len(x_new)):
plt.scatter(x_new[i][0], x_new[i][1], c=colors[k_means.labels_[i]], s=10)
plt.show() | code |
2010993/cell_14 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head | code |
2010993/cell_53 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=10)
rf.fit(x_train, y_train)
rf_predict = rf.predict(x_test)
rf_predict_prob = rf.predict_proba(x_test)
rf_conf_matrix = confusion_matrix(y_test, rf_predict)
rf_accuracy_score = accuracy_score(y_test, rf_predict)
print(rf_conf_matrix)
print(rf_accuracy_score) | code |
2010993/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
data.head() | code |
2010993/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import auc
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape
x.head
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(x)
new_pca = PCA(n_components=17)
x_new = new_pca.fit_transform(x)
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2)
k_means.fit_predict(x_new)
colors = ['r', 'g']
from sklearn.metrics import auc
lr_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 9))
plt.plot(fpr, tpr, label='AUC= %0.2f' % lr_auc)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.legend() | code |
2010993/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mushrooms.csv')
data.shape
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder()
for col in data.columns:
data[col] = lbl.fit_transform(data[col])
y = data['class']
x = data.iloc[:, 1:23]
x.shape | code |
2010993/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.metrics import auc
from sklearn.metrics import auc
lr_auc = auc(fpr, tpr)
print(lr_auc) | code |
333589/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys() | code |
333589/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
print(len(set(fanboy_handles)), len(set(about_handles))) | code |
333589/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
from matplotlib import *
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
333589/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
print(len(set(fanboy_data['username'])), len(set(about_data['username']))) | code |
333589/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib
import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
bet_cen = nx.betweenness_centrality([i for i in fanboy_cc][0])
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
clo_cen = nx.closeness_centrality([i for i in fanboy_cc][0])
fig, ax = matplotlib.pyplot.subplots()
ax.scatter(list(clo_cen.values()), list(bet_cen.values()))
ax.set_ylim(0.05, 0.35)
for i, txt in enumerate(list(clo_cen.keys())):
ax.annotate(txt, (list(clo_cen.values())[i], list(bet_cen.values())[i])) | code |
333589/cell_12 | [
"text_plain_output_1.png"
] | import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
print(1 / (float(fanboy_graph.order()) / float(fanboy_graph.size())))
print(1 / (float(about_graph.order()) / float(about_graph.size()))) | code |
327075/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/Indicators.csv')
Indicator_array = df[['IndicatorName', 'IndicatorCode']].drop_duplicates().values
modified_indicators = []
unique_indicator_codes = []
for ele in Indicator_array:
indicator = ele[0]
indicator_code = ele[1].strip()
if indicator_code not in unique_indicator_codes:
new_indicator = re.sub('[,()]', '', indicator).lower()
new_indicator = re.sub('-', ' to ', new_indicator).lower()
modified_indicators.append([new_indicator, indicator_code])
unique_indicator_codes.append(indicator_code)
Indicators = pd.DataFrame(modified_indicators, columns=['IndicatorName', 'IndicatorCode'])
Indicators = Indicators.drop_duplicates()
print(Indicators.shape) | code |
327075/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/Indicators.csv')
Indicator_array = df[['IndicatorName', 'IndicatorCode']].drop_duplicates().values
modified_indicators = []
unique_indicator_codes = []
for ele in Indicator_array:
indicator = ele[0]
indicator_code = ele[1].strip()
if indicator_code not in unique_indicator_codes:
new_indicator = re.sub('[,()]', '', indicator).lower()
new_indicator = re.sub('-', ' to ', new_indicator).lower()
modified_indicators.append([new_indicator, indicator_code])
unique_indicator_codes.append(indicator_code)
Indicators = pd.DataFrame(modified_indicators, columns=['IndicatorName', 'IndicatorCode'])
Indicators = Indicators.drop_duplicates()
key_word_dict = {}
key_word_dict['Demography'] = ['population', 'birth', 'death', 'fertility', 'mortality', 'expectancy']
key_word_dict['Food'] = ['food', 'grain', 'nutrition', 'calories']
key_word_dict['Trade'] = ['trade', 'import', 'export', 'good', 'shipping', 'shipment']
key_word_dict['Health'] = ['health', 'desease', 'hospital', 'mortality', 'doctor']
key_word_dict['Economy'] = ['income', 'gdp', 'gni', 'deficit', 'budget', 'market', 'stock', 'bond', 'infrastructure']
key_word_dict['Energy'] = ['fuel', 'energy', 'power', 'emission', 'electric', 'electricity']
key_word_dict['Education'] = ['education', 'literacy']
key_word_dict['Employment'] = ['employed', 'employment', 'umemployed', 'unemployment']
key_word_dict['Rural'] = ['rural', 'village']
key_word_dict['Urban'] = ['urban', 'city']
feature = 'Health'
for indicator_ele in Indicators.values:
for ele in key_word_dict[feature]:
word_list = indicator_ele[0].split()
if ele in word_list or ele + 's' in word_list:
print(indicator_ele)
break | code |
104115416/cell_9 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import os
DATA_DIR = '../input/catsvsdogstest/cats-vs-dogs-1000/dogs_cats_sample_1000/dogs_cats_sample_1000/'
train_dir = os.path.join(DATA_DIR, 'train')
valid_dir = os.path.join(DATA_DIR, 'valid')
test_dir = os.path.join(DATA_DIR, 'test')
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
hist = model.fit_generator(train_generator, steps_per_epoch=100, epochs=10, validation_data=valid_generator, validation_steps=50)
model.save('cats_and_dogs_test') | code |
104115416/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import os
print(os.listdir('../input/catsvsdogstest')) | code |
104115416/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import os
DATA_DIR = '../input/catsvsdogstest/cats-vs-dogs-1000/dogs_cats_sample_1000/dogs_cats_sample_1000/'
train_dir = os.path.join(DATA_DIR, 'train')
valid_dir = os.path.join(DATA_DIR, 'valid')
test_dir = os.path.join(DATA_DIR, 'test')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
valid_cats_dir = os.path.join(valid_dir, 'cats')
valid_dogs_dir = os.path.join(valid_dir, 'dogs')
print('total train cat image:', len(os.listdir(train_cats_dir)))
print('total train dog image:', len(os.listdir(train_dogs_dir)))
print('total validation cat image:', len(os.listdir(valid_cats_dir)))
print('total validation dog image:', len(os.listdir(valid_dogs_dir))) | code |
104115416/cell_8 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout
from keras.layers import Dense, Dropout
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) | code |
104115416/cell_10 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import os
DATA_DIR = '../input/catsvsdogstest/cats-vs-dogs-1000/dogs_cats_sample_1000/dogs_cats_sample_1000/'
train_dir = os.path.join(DATA_DIR, 'train')
valid_dir = os.path.join(DATA_DIR, 'valid')
test_dir = os.path.join(DATA_DIR, 'test')
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
for data_batch, labels_batch in train_generator:
break
hist = model.fit_generator(train_generator, steps_per_epoch=100, epochs=10, validation_data=valid_generator, validation_steps=50)
model.save('cats_and_dogs_test')
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='training acc')
plt.plot(epochs, val_acc, 'b', label='valid acc')
plt.title('Training & valid accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='training loss')
plt.plot(epochs, val_loss, 'b', label='valid loss')
plt.legend()
plt.figure()
plt.show() | code |
104115416/cell_12 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
DATA_DIR = '../input/catsvsdogstest/cats-vs-dogs-1000/dogs_cats_sample_1000/dogs_cats_sample_1000/'
train_dir = os.path.join(DATA_DIR, 'train')
valid_dir = os.path.join(DATA_DIR, 'valid')
test_dir = os.path.join(DATA_DIR, 'test')
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
for data_batch, labels_batch in train_generator:
break
hist = model.fit_generator(train_generator, steps_per_epoch=100, epochs=10, validation_data=valid_generator, validation_steps=50)
model.save('cats_and_dogs_test')
imagename = '../input/test-picture/training_picture/21.jpg'
test_image = image.load_img(imagename, target_size=(150, 150))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = model.predict(test_image)
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print(imagename)
print(prediction) | code |
16144426/cell_13 | [
"text_plain_output_1.png"
] | from nltk.stem import PorterStemmer
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape | code |
16144426/cell_9 | [
"text_plain_output_1.png"
] | import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
print(word, embeddings[word][:5])
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
print(word, 'is not there') | code |
16144426/cell_25 | [
"text_html_output_1.png"
] | from nltk.stem import PorterStemmer
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape
docs_vectors = pd.DataFrame()
for doc in docs_clean:
words = nltk.word_tokenize(doc)
temp = pd.DataFrame()
for word in words:
try:
word_vec = embeddings[word]
temp = temp.append(pd.Series(word_vec), ignore_index=True)
except:
pass
docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True)
docs_vectors.shape
pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
X = docs_vectors.drop([64, 590])
Y = data['sentiment'].drop([64, 590])
url = 'https://bit.ly/2W21FY7'
data = pd.read_csv(url)
data.shape
docs = data.loc[:, 'Lower_Case_Reviews']
Y = data['Sentiment_Manual']
Y.head() | code |
16144426/cell_4 | [
"text_plain_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5]) | code |
16144426/cell_34 | [
"text_plain_output_1.png"
] | from nltk.stem import PorterStemmer
from nltk.stem import PorterStemmer
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape
docs_vectors = pd.DataFrame()
for doc in docs_clean:
words = nltk.word_tokenize(doc)
temp = pd.DataFrame()
for word in words:
try:
word_vec = embeddings[word]
temp = temp.append(pd.Series(word_vec), ignore_index=True)
except:
pass
docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True)
docs_vectors.shape
pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
X = docs_vectors.drop([64, 590])
Y = data['sentiment'].drop([64, 590])
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.metrics import accuracy_score
model = RandomForestClassifier(n_estimators=800)
model.fit(xtrain, ytrain)
test_pred = model.predict(xtest)
accuracy_score(ytest, test_pred)
model = AdaBoostClassifier(n_estimators=800)
model.fit(xtrain, ytrain)
test_pred = model.predict(xtest)
accuracy_score(ytest, test_pred)
url = 'https://bit.ly/2W21FY7'
data = pd.read_csv(url)
data.shape
docs = data.loc[:, 'Lower_Case_Reviews']
Y = data['Sentiment_Manual']
Y.value_counts()
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [stemmer.stem(word) for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
X = docs_clean
(X.shape, Y.shape)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(min_df=5)
cv.fit(X)
XTRAIN = cv.transform(xtrain)
XTEST = cv.transform(xtest)
XTRAIN = XTRAIN.toarray()
XTEST = XTEST.toarray()
from sklearn.tree import DecisionTreeClassifier as dtc
from sklearn.metrics import accuracy_score
model = dtc(max_depth=10)
model.fit(XTRAIN, ytrain)
yp = model.predict(XTEST)
accuracy_score(ytest, yp) | code |
16144426/cell_23 | [
"text_plain_output_1.png"
] | from nltk.stem import PorterStemmer
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape
docs_vectors = pd.DataFrame()
for doc in docs_clean:
words = nltk.word_tokenize(doc)
temp = pd.DataFrame()
for word in words:
try:
word_vec = embeddings[word]
temp = temp.append(pd.Series(word_vec), ignore_index=True)
except:
pass
docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True)
docs_vectors.shape
pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
url = 'https://bit.ly/2W21FY7'
data = pd.read_csv(url)
data.shape
data.head() | code |
16144426/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.metrics import accuracy_score
model = RandomForestClassifier(n_estimators=800)
model.fit(xtrain, ytrain)
test_pred = model.predict(xtest)
accuracy_score(ytest, test_pred)
model = AdaBoostClassifier(n_estimators=800)
model.fit(xtrain, ytrain)
test_pred = model.predict(xtest)
accuracy_score(ytest, test_pred) | code |
16144426/cell_6 | [
"text_plain_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
data.head() | code |
16144426/cell_29 | [
"text_plain_output_1.png"
] | from nltk.stem import PorterStemmer
from nltk.stem import PorterStemmer
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape
docs_vectors = pd.DataFrame()
for doc in docs_clean:
words = nltk.word_tokenize(doc)
temp = pd.DataFrame()
for word in words:
try:
word_vec = embeddings[word]
temp = temp.append(pd.Series(word_vec), ignore_index=True)
except:
pass
docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True)
docs_vectors.shape
pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
X = docs_vectors.drop([64, 590])
Y = data['sentiment'].drop([64, 590])
url = 'https://bit.ly/2W21FY7'
data = pd.read_csv(url)
data.shape
docs = data.loc[:, 'Lower_Case_Reviews']
Y = data['Sentiment_Manual']
Y.value_counts()
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [stemmer.stem(word) for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
X = docs_clean
(X.shape, Y.shape) | code |
16144426/cell_26 | [
"text_plain_output_1.png"
] | from nltk.stem import PorterStemmer
import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
embeddings.most_similar('modi', topn=10)
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
words = nltk.word_tokenize(doc1.lower())
temp = pd.DataFrame()
for word in words:
try:
temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True)
except:
docs = docs.str.lower().str.replace('[^a-z ]', '')
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stopwords = nltk.corpus.stopwords.words('english')
def clean_doc(doc):
words = doc.split(' ')
words_clean = [word for word in words if word not in stopwords]
doc_clean = ' '.join(words_clean)
return doc_clean
docs_clean = docs.apply(clean_doc)
docs_clean.shape
docs_vectors = pd.DataFrame()
for doc in docs_clean:
words = nltk.word_tokenize(doc)
temp = pd.DataFrame()
for word in words:
try:
word_vec = embeddings[word]
temp = temp.append(pd.Series(word_vec), ignore_index=True)
except:
pass
docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True)
docs_vectors.shape
pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
X = docs_vectors.drop([64, 590])
Y = data['sentiment'].drop([64, 590])
url = 'https://bit.ly/2W21FY7'
data = pd.read_csv(url)
data.shape
docs = data.loc[:, 'Lower_Case_Reviews']
Y = data['Sentiment_Manual']
Y.value_counts() | code |
16144426/cell_2 | [
"text_html_output_1.png"
] | import gensim
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) | code |
16144426/cell_11 | [
"text_plain_output_1.png"
] | import gensim
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin'
embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
pd.Series(embeddings['modi'][:5])
url = 'https://bit.ly/2S2yXEd'
data = pd.read_csv(url)
doc1 = data.iloc[0, 0]
docs = data['review']
docs = docs.str.lower().str.replace('[^a-z ]', '')
docs.head() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.