path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121154806/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) BATCH_SIZE = 5 EPOCHS = 400 N_CLASS = 1 ACTIVATION = 'sigmoid' CALLBACKS = [tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss')] def Model_Training(model_list, batch_size, epochs, callbacks): model_dict = {} for key, dict_i in model_list.items(): if dict_i['Train'] == True: model = dict_i['model'] model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy', sm.losses.bce_jaccard_loss, sm.metrics.iou_score]) model.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25, verbose=2, callbacks=callbacks) model_dict[key] = model return model_dict model_list = {'unet-efficientnetb0': {'model': sm.Unet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}, 'linknet-efficientnetb0': {'model': sm.Linknet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}} model_dict = Model_Training(model_list, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=CALLBACKS)
code
121154806/cell_1
[ "text_plain_output_1.png" ]
!pip install -U segmentation-models !pip install gif2numpy import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import segmentation_models as sm import matplotlib.pyplot as plt import os from tqdm import tqdm import gif2numpy
code
121154806/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) BATCH_SIZE = 5 EPOCHS = 400 N_CLASS = 1 ACTIVATION = 'sigmoid' CALLBACKS = [tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss')] def Model_Training(model_list, batch_size, epochs, callbacks): model_dict = {} for key, dict_i in model_list.items(): if dict_i['Train'] == True: model = dict_i['model'] model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy', sm.losses.bce_jaccard_loss, sm.metrics.iou_score]) model.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_split=0.25, verbose=2, callbacks=callbacks) model_dict[key] = model return model_dict model_list = {'unet-efficientnetb0': {'model': sm.Unet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}, 'linknet-efficientnetb0': {'model': sm.Linknet('efficientnetb0', classes=N_CLASS, activation=ACTIVATION), 'Train': True}}
code
121154806/cell_8
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) input_data_drive_train = os.path.join(root, 'training/images') target_data_drive_train = os.path.join(root, 'training/1st_manual') images_drive_train, masks_drive_train = Data_sorting(input_data_drive_train, target_data_drive_train, exts) input_data_drive_test = os.path.join(root, 'test/images') target_data_drive_test = os.path.join(root, 'test/mask') images_drive_test, masks_drive_test = Data_sorting(input_data_drive_test, target_data_drive_test, exts)
code
121154806/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) def visualize(**images): """PLot images in one row.""" n = len(images) for i, (name, image) in enumerate(images.items()): plt.xticks([]) plt.yticks([]) for img, msk in zip(X_train[:6], y_train[:6]): visualize(image=img, gt_mask=np.squeeze(msk))
code
121154806/cell_12
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))]) masks = sorted([os.path.join(target_data, fname) for fname in os.listdir(target_data) if fname.endswith(exts) and (not fname.startswith('.'))]) return (images, masks) def Create_Dataset(folder_path, is_mask, img_height, img_width, img_channels): length = len(folder_path) X = np.zeros((length, img_height, img_width, img_channels), dtype=np.uint8) y = np.zeros((length, img_height, img_width, 1), dtype=np.bool) if not is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (img_height, img_width)) X[id] = img return X if is_mask: for id, fname in tqdm(enumerate(folder_path)): if fname.endswith('gif'): img = gif2numpy.convert(fname)[0][0] else: img = cv2.imread(fname) img = cv2.resize(img, (img_height, img_width)) img = img[:, :, 0] img = np.expand_dims(img, axis=-1) y[id] = img return y IMG_HEIGHT = 512 IMG_WIDTH = 512 IMG_CHANNELS = 3 X_train = Create_Dataset(folder_path=images_drive_train, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) X_test = Create_Dataset(folder_path=images_drive_test, is_mask=False, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=IMG_CHANNELS) y_train = Create_Dataset(folder_path=masks_drive_train, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) y_test = Create_Dataset(folder_path=masks_drive_test, is_mask=True, img_height=IMG_HEIGHT, img_width=IMG_WIDTH, img_channels=1) plt.imshow(X_test[5])
code
90124098/cell_9
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(x) wcss.append(km.inertia_) x = np.array(x) km = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10, random_state=0) y_means = km.fit_predict(x) plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s=100, c='pink', label='Cluster 1') plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s=100, c='yellow', label='Cluster 2') plt.scatter(x[y_means == 2, 0], x[y_means == 2, 1], s=100, c='cyan', label='Cluster 3') plt.scatter(x[y_means == 3, 0], x[y_means == 3, 1], s=100, c='magenta', label='Cluster 4') plt.scatter(x[y_means == 4, 0], x[y_means == 4, 1], s=100, c='orange', label='Cluster 5') plt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=50, c='blue', label='Centeroid') plt.style.use('fivethirtyeight') plt.title('K Means Clustering', fontsize=20) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.legend() plt.grid() plt.show()
code
90124098/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.info()
code
90124098/cell_8
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(x) wcss.append(km.inertia_) plt.plot(range(1, 11), wcss) plt.title('The Elbow Method', fontsize=20) plt.xlabel('No. of Clusters') plt.ylabel('wcss') plt.show()
code
90124098/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.head()
code
90124098/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] x.head()
code
1006487/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data data[data['category'] == 'robotics']['tags'][0:20]
code
1006487/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_all['what']
code
1006487/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_robotics = freqDist(robotics) relative_Freq = relativeFreq(Fdist_robotics, Fdist_all) relative_Freq[0:100]
code
1006487/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data data[0:5]
code
1006487/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords from subprocess import check_output import numpy as np import pandas as pd import nltk import re from bs4 import BeautifulSoup from nltk.corpus import stopwords import seaborn as sns import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from string import punctuation stop = set(stopwords.words('english')) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1006487/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) print(float(sum(teaser)) / len(teaser)) sns.distplot(teaser)
code
1006487/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_robotics = freqDist(robotics) Fdist_robotics['while1']
code
1006487/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_all['while1']
code
1006487/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology'])
code
1006487/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_robotics = freqDist(robotics) Fdist_robotics['rna'] / Fdist_all['rna']
code
1006487/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_all['rna']
code
1006487/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) Fdist_robotics = freqDist(robotics) relative_Freq = relativeFreq(Fdist_robotics, Fdist_all)
code
1006487/cell_10
[ "text_plain_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text)
code
1006487/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data def list_to_str(lists): strs = '' for content in lists: strs += content return strs def to_plain_text(dataframe): text = list_to_str(dataframe['all_text'].apply(lambda x: x.replace('\n', ' ')).tolist()) return text def to_nltk_text(dataframe): text = to_plain_text(dataframe) return nltk.Text(nltk.word_tokenize(text)) all_text = to_nltk_text(data) robotics = to_nltk_text(data[data['category'] == 'biology']) def freqDist(text): freqDist = {} for word in text: if word in freqDist: freqDist[word] += 1 else: freqDist[word] = 1 return freqDist def relativeFreq(subset, alls, sort=True, adjusted=True): result = [' '] * len(subset) modifier = 1 for i, key in enumerate(subset.keys()): if adjusted == True: if alls[key] > 100: modifier = 1 else: modifier = 0 tf = float(subset[key]) / alls[key] result[i] = (key, tf * modifier) if sort == True: result.sort(key=lambda tup: tup[1], reverse=True) return result Fdist_all = freqDist(all_text) teaser = list(Fdist_all.values()) type(teaser) len(Fdist_all)
code
1006487/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics.csv') travel = pd.read_csv('../input/travel.csv') test = pd.read_csv('../input/test.csv') def strip_punctuation(s): return ''.join((c for c in s if c not in punctuation)) def remove_html(s): soup = BeautifulSoup(s, 'html.parser') content = soup.get_text() return content def text_transform(dataframe): dataframe['title'] = dataframe['title'].apply(lambda x: strip_punctuation(str.lower(x))) dataframe['content'] = dataframe['content'].apply(lambda x: strip_punctuation(str.lower(remove_html(x)))) def load_data(name): utl = '../input/' + name + '.csv' files = pd.read_csv(utl) text_transform(files) files['category'] = name return files def merge_data(list_of_files): list_of_dataframe = [''] * len(list_of_files) for i in range(0, len(list_of_files)): list_of_dataframe[i] = load_data(list_of_files[i]) data = pd.concat(list_of_dataframe, axis=0, ignore_index=True) return data data['all_text'] = data['title'] + ' ' + data['content']
code
74045588/cell_9
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.metrics import AUC import gc import glob import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, target_size=(224, 224), color_mode='rgb', batch_size=64, class_mode='categorical', shuffle=True) from tensorflow.keras.applications import EfficientNetB0 efnb0 = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) model = Sequential() model.add(efnb0) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.2)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(4, activation='sigmoid')) for layer in efnb0.layers: layer.trainable = False model.summary() from tensorflow.keras.metrics import AUC auc = AUC() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', auc]) hist = model.fit_generator(train_generator, steps_per_epoch=231, epochs=50) import glob from keras.preprocessing.image import load_img, img_to_array import gc img_list = glob.glob('/kaggle/input/1056lab-covid19-chest-xray-recognit/test/*.png') img_list.sort() y_pred = [] for img_sublist in np.array_split(img_list, 10): img_array_list = [] for path in img_sublist: img = load_img(path, color_mode='rgb', target_size=(224, 224, 3)) img_array = img_to_array(img) img_array_list.append(img_array) X_test = np.array(img_array_list) y_test = model.predict(X_test)[:, 0] y_pred = np.concatenate([y_pred, y_test]) del img_array_list del X_test gc.collect() submit_df = pd.read_csv('/kaggle/input/1056lab-covid19-chest-xray-recognit/sampleSubmission.csv', index_col=0) submit_df['COVID'] = y_pred submit_df.to_csv('submission.csv') submit_df
code
74045588/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, target_size=(224, 224), color_mode='rgb', batch_size=64, class_mode='categorical', shuffle=True)
code
74045588/cell_7
[ "text_html_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.metrics import AUC train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, target_size=(224, 224), color_mode='rgb', batch_size=64, class_mode='categorical', shuffle=True) from tensorflow.keras.applications import EfficientNetB0 efnb0 = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) model = Sequential() model.add(efnb0) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.2)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(4, activation='sigmoid')) for layer in efnb0.layers: layer.trainable = False model.summary() from tensorflow.keras.metrics import AUC auc = AUC() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', auc]) hist = model.fit_generator(train_generator, steps_per_epoch=231, epochs=50)
code
74045588/cell_10
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, target_size=(224, 224), color_mode='rgb', batch_size=64, class_mode='categorical', shuffle=True) train_generator.class_indices
code
74045588/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.applications import EfficientNetB0 efnb0 = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) model = Sequential() model.add(efnb0) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.2)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(4, activation='sigmoid')) for layer in efnb0.layers: layer.trainable = False model.summary()
code
104128103/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error, mean_squared_error import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def settings(): plt.style.use('bmh') plt.rcParams['figure.figsize'] = [25, 12] plt.rcParams['font.size'] = 24 plt.rcParams['figure.dpi'] = 100 sns.set() def ml_error(model_name, ytest, yhat): mae = mean_absolute_error(ytest, yhat) mape = mean_absolute_percentage_error(ytest, yhat) rmse = np.sqrt(mean_squared_error(ytest, yhat)) return pd.DataFrame({'Model name': model_name, 'MAE': mae, 'MAPE': mape, 'RMSE': rmse}, index=[0]) def analise_bivariada(df, column): aux1 = df[[column, 'preco']].groupby(column).mean().reset_index() aux2 = df[[column, 'preco']].groupby(column).median().reset_index() df_raw = pd.read_csv('treino.csv') df_test = pd.read_csv('teste.csv')
code
17120136/cell_21
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 print(fd_cuisine.most_common()[-50:])
code
17120136/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) print(review_tokens[0])
code
17120136/cell_25
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']] = newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']].astype('str') newdata['text'] = newdata['reviews_list'] + ' ' + newdata['menu_item'] + ' ' + newdata['dish_liked'] + ' ' + newdata['cuisines'] text_data = newdata[['text', 'rating']] text_data['text'] = text_data['text'].apply(lambda x: x.lower()) tokens = text_data['text'].apply(tokenizer.tokenize) tokens = tokens.apply(lambda x: [token for token in x if token not in stop]) print(tokens[0])
code
17120136/cell_33
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Flatten, Embedding, Conv1D, MaxPooling1D, Dropout from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.regularizers import l1, l2 from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']] = newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']].astype('str') newdata['text'] = newdata['reviews_list'] + ' ' + newdata['menu_item'] + ' ' + newdata['dish_liked'] + ' ' + newdata['cuisines'] text_data = newdata[['text', 'rating']] text_data['text'] = text_data['text'].apply(lambda x: x.lower()) tokens = text_data['text'].apply(tokenizer.tokenize) tokens = tokens.apply(lambda x: [token for token in x if token not in stop]) lmtzr = WordNetLemmatizer() def lem(text): return [lmtzr.lemmatize(word) for word in text] tokens_new = tokens.apply(lem) le = LabelEncoder() target = le.fit_transform(text_data['rating']) X_train, X_test, y_train, y_test = train_test_split(tokens_new, target, test_size=0.3, random_state=0, stratify=target) t = Tokenizer() t.fit_on_texts(X_train) vocab_size = len(t.word_index) + 1 train_sequences = t.texts_to_sequences(X_train) test_sequences = t.texts_to_sequences(X_test) train_padded = pad_sequences(train_sequences, maxlen=500, padding='post') test_padded = pad_sequences(test_sequences, maxlen=500, padding='post') model = Sequential() model.add(Embedding(vocab_size, 100, input_length=500)) model.add(Dropout(1)) model.add(Conv1D(32, 3, activation='relu', kernel_regularizer=l1(1e-05))) model.add(MaxPooling1D(2)) model.add(Dropout(1)) model.add(Flatten()) model.add(Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_padded, y_train, epochs=5, batch_size=64, validation_data=(test_padded, y_test)) pred_train = model.predict(train_padded) pred_train = np.argmax(pred_train, axis=1) print(classification_report(y_train, pred_train))
code
17120136/cell_20
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 print(fd_cuisine.most_common(50))
code
17120136/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) newdata.describe(include='all')
code
17120136/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import nltk from nltk.corpus import RegexpTokenizer as regextoken from nltk.corpus import stopwords from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer import matplotlib from matplotlib import pyplot as plt import seaborn as sns from keras.preprocessing.text import Tokenizer from sklearn.preprocessing import LabelEncoder from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.models import Model from keras.layers import Dense, Flatten, Embedding, Conv1D, MaxPooling1D, Dropout from keras.regularizers import l1, l2 from sklearn.metrics import classification_report import warnings warnings.filterwarnings('ignore') zomato = pd.read_csv('../input/zomato.csv', na_values=['-', '']) data = zomato.copy()
code
17120136/cell_11
[ "text_html_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5)
code
17120136/cell_18
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) bigrams = bigrams(cleaned_reviews) fd_bigrams = FreqDist() for bigram in bigrams: fd_bigrams[bigram] += 1 fd_bigrams.most_common(5) trigrams = trigrams(cleaned_reviews) fd_trigrams = FreqDist() for trigram in trigrams: fd_trigrams[trigram] += 1 fd_trigrams.most_common(5) plt.figure(figsize=(10, 5)) fd_trigrams.plot(50) plt.show()
code
17120136/cell_28
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']] = newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']].astype('str') newdata['text'] = newdata['reviews_list'] + ' ' + newdata['menu_item'] + ' ' + newdata['dish_liked'] + ' ' + newdata['cuisines'] text_data = newdata[['text', 'rating']] text_data['text'] = text_data['text'].apply(lambda x: x.lower()) tokens = text_data['text'].apply(tokenizer.tokenize) tokens = tokens.apply(lambda x: [token for token in x if token not in stop]) lmtzr = WordNetLemmatizer() def lem(text): return [lmtzr.lemmatize(word) for word in text] tokens_new = tokens.apply(lem) le = LabelEncoder() target = le.fit_transform(text_data['rating']) X_train, X_test, y_train, y_test = train_test_split(tokens_new, target, test_size=0.3, random_state=0, stratify=target) t = Tokenizer() t.fit_on_texts(X_train) vocab_size = len(t.word_index) + 1 print(vocab_size)
code
17120136/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0]
code
17120136/cell_15
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) bigrams = bigrams(cleaned_reviews) fd_bigrams = FreqDist() for bigram in bigrams: fd_bigrams[bigram] += 1 fd_bigrams.most_common(5) plt.figure(figsize=(10, 5)) fd_bigrams.plot(50) plt.show()
code
17120136/cell_17
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) trigrams = trigrams(cleaned_reviews) fd_trigrams = FreqDist() for trigram in trigrams: fd_trigrams[trigram] += 1 fd_trigrams.most_common(5)
code
17120136/cell_31
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Flatten, Embedding, Conv1D, MaxPooling1D, Dropout from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.regularizers import l1, l2 from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) cuisines = newdata[['cuisines', 'rating']] cuisines['cuisines'] = cuisines['cuisines'].astype(str) cuisines['cuisines'] = cuisines['cuisines'].apply(lambda x: x.lower()) cuisine_tokens = cuisines['cuisines'].apply(tokenizer.tokenize) all_cuisines = cuisine_tokens.astype(str).str.cat() cleaned_cuisines = tokenizer.tokenize(all_cuisines) fd_cuisine = FreqDist() for cuisine in cleaned_cuisines: fd_cuisine[cuisine] += 1 newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']] = newdata[['reviews_list', 'menu_item', 'dish_liked', 'cuisines']].astype('str') newdata['text'] = newdata['reviews_list'] + ' ' + newdata['menu_item'] + ' ' + newdata['dish_liked'] + ' ' + newdata['cuisines'] text_data = newdata[['text', 'rating']] text_data['text'] = text_data['text'].apply(lambda x: x.lower()) tokens = text_data['text'].apply(tokenizer.tokenize) tokens = tokens.apply(lambda x: [token for token in x if token not in stop]) lmtzr = WordNetLemmatizer() def lem(text): return [lmtzr.lemmatize(word) for word in text] tokens_new = tokens.apply(lem) le = LabelEncoder() target = le.fit_transform(text_data['rating']) X_train, X_test, y_train, y_test = train_test_split(tokens_new, target, test_size=0.3, random_state=0, stratify=target) t = Tokenizer() t.fit_on_texts(X_train) vocab_size = len(t.word_index) + 1 train_sequences = t.texts_to_sequences(X_train) test_sequences = t.texts_to_sequences(X_test) train_padded = pad_sequences(train_sequences, maxlen=500, padding='post') test_padded = pad_sequences(test_sequences, maxlen=500, padding='post') model = Sequential() model.add(Embedding(vocab_size, 100, input_length=500)) model.add(Dropout(1)) model.add(Conv1D(32, 3, activation='relu', kernel_regularizer=l1(1e-05))) model.add(MaxPooling1D(2)) model.add(Dropout(1)) model.add(Flatten()) model.add(Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_padded, y_train, epochs=5, batch_size=64, validation_data=(test_padded, y_test))
code
17120136/cell_14
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) bigrams = bigrams(cleaned_reviews) fd_bigrams = FreqDist() for bigram in bigrams: fd_bigrams[bigram] += 1 fd_bigrams.most_common(5)
code
17120136/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords stop = stopwords.words('english') print(stop)
code
17120136/cell_12
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata = newdata.reset_index(drop=True) newdata['rating'] = newdata['rate'].str[:3] newdata = newdata[newdata.rating != 'NEW'] newdata = newdata.dropna(subset=['rating']) newdata['rating'] = pd.to_numeric(newdata['rating']) newdata['rating'] = pd.cut(newdata['rating'], bins=[0, 3.0, 3.5, 4.0, 5.0], labels=['0', '1', '2', '3']) reviews_data = newdata[['reviews_list', 'rating']] reviews_data['reviews_list'][0] reviews_data['reviews_list'] = reviews_data['reviews_list'].apply(lambda x: x.lower()) tokenizer = regextoken('[a-zA-Z]+') review_tokens = reviews_data['reviews_list'].apply(tokenizer.tokenize) stop = stopwords.words('english') stop.extend(['rated', 'n', 'nan', 'x']) review_tokens = review_tokens.apply(lambda x: [token for token in x if token not in stop]) all_reviews = review_tokens.astype(str).str.cat() cleaned_reviews = tokenizer.tokenize(all_reviews) fd = FreqDist() for word in cleaned_reviews: fd[word] += 1 fd.most_common(5) plt.figure(figsize=(10, 5)) fd.plot(50) plt.show()
code
2029692/cell_13
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from scipy.stats import skew from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y="SalePrice", data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show() numeric_missing = train[numreicMostCorr].isnull().sum().sort_values(ascending=False) categorical_missing = train[categorical].isnull().sum().sort_values(ascending=False) numeric_to_delete = numeric_missing[numeric_missing > 438].index categorical_to_delete = categorical_missing[categorical_missing > 438].index def removeFromList(sourceList, filterList): filteredList = list(filter(lambda x: x not in filterList, sourceList)) return filteredList numreicMostCorr = removeFromList(numreicMostCorr, numeric_to_delete) categorical = removeFromList(categorical, categorical_to_delete) numreicMostCorr = removeFromList(numreicMostCorr, ['GarageCars', '1stFlrSF', '2ndFlrSF', 'YearRemodAdd', 'FullBath']) categorical = removeFromList(categorical, ['Alley', 'LotShape', 'LandSlope', 'BldgType', 'Exterior1st', 'Exterior2nd', 'ExterCond', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'GarageFinish', 'GarageType', 'GarageCond', 'Fence']) all_columns = numreicMostCorr + categorical + ['SalePrice'] train = train[all_columns] train['LotFrontage'] = train['LotFrontage'].fillna(train['LotFrontage'].mean()) train[numreicMostCorr].isnull().sum().sort_values(ascending=False) train['BsmtQual'] = train['BsmtQual'].fillna('Missing') train['GarageQual'] = train['GarageQual'].fillna('Missing') train['MasVnrType'] = train['MasVnrType'].fillna('Missing') train = train.drop(train.loc[train['Electrical'].isnull()].index) train[categorical].isnull().sum().sort_values(ascending=False) from scipy.stats import skew skewed_cols = numreicMostCorr + ['SalePrice'] skewed = train[skewed_cols].apply(lambda x: skew(x.dropna())) skewed = skewed[skewed > 0.75] skewed = skewed.index train[skewed] = np.log1p(train[skewed]) for numer in skewed_cols: numerFeature = pd.DataFrame({'unskewed_' + numer: train[numer]}) train = pd.get_dummies(train) train.head(10)
code
2029692/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y="SalePrice", data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show() numeric_missing = train[numreicMostCorr].isnull().sum().sort_values(ascending=False) numeric_missing.head(20) categorical_missing = train[categorical].isnull().sum().sort_values(ascending=False) categorical_missing.head(20)
code
2029692/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=0.8, square=True)
code
2029692/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) featureDF.plot.scatter(x=feature, y='SalePrice', ylim=(0, 800000))
code
2029692/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train['SalePrice'].describe()
code
2029692/cell_11
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y="SalePrice", data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show() numeric_missing = train[numreicMostCorr].isnull().sum().sort_values(ascending=False) categorical_missing = train[categorical].isnull().sum().sort_values(ascending=False) numeric_to_delete = numeric_missing[numeric_missing > 438].index categorical_to_delete = categorical_missing[categorical_missing > 438].index def removeFromList(sourceList, filterList): filteredList = list(filter(lambda x: x not in filterList, sourceList)) return filteredList numreicMostCorr = removeFromList(numreicMostCorr, numeric_to_delete) categorical = removeFromList(categorical, categorical_to_delete) numreicMostCorr = removeFromList(numreicMostCorr, ['GarageCars', '1stFlrSF', '2ndFlrSF', 'YearRemodAdd', 'FullBath']) categorical = removeFromList(categorical, ['Alley', 'LotShape', 'LandSlope', 'BldgType', 'Exterior1st', 'Exterior2nd', 'ExterCond', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'GarageFinish', 'GarageType', 'GarageCond', 'Fence']) all_columns = numreicMostCorr + categorical + ['SalePrice'] train = train[all_columns] train['LotFrontage'] = train['LotFrontage'].fillna(train['LotFrontage'].mean()) train[numreicMostCorr].isnull().sum().sort_values(ascending=False) train['BsmtQual'] = train['BsmtQual'].fillna('Missing') train['GarageQual'] = train['GarageQual'].fillna('Missing') train['MasVnrType'] = train['MasVnrType'].fillna('Missing') train = train.drop(train.loc[train['Electrical'].isnull()].index) train[categorical].isnull().sum().sort_values(ascending=False)
code
2029692/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns import numpy as np from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import matplotlib.pyplot as plt from scipy.stats import skew from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2029692/cell_7
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) featureDF.plot.scatter(x=pair[0], y=pair[1])
code
2029692/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y='SalePrice', data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show()
code
2029692/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y="SalePrice", data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show() numeric_missing = train[numreicMostCorr].isnull().sum().sort_values(ascending=False) categorical_missing = train[categorical].isnull().sum().sort_values(ascending=False) numeric_to_delete = numeric_missing[numeric_missing > 438].index categorical_to_delete = categorical_missing[categorical_missing > 438].index def removeFromList(sourceList, filterList): filteredList = list(filter(lambda x: x not in filterList, sourceList)) return filteredList numreicMostCorr = removeFromList(numreicMostCorr, numeric_to_delete) categorical = removeFromList(categorical, categorical_to_delete) numreicMostCorr = removeFromList(numreicMostCorr, ['GarageCars', '1stFlrSF', '2ndFlrSF', 'YearRemodAdd', 'FullBath']) categorical = removeFromList(categorical, ['Alley', 'LotShape', 'LandSlope', 'BldgType', 'Exterior1st', 'Exterior2nd', 'ExterCond', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'GarageFinish', 'GarageType', 'GarageCond', 'Fence']) print(categorical)
code
2029692/cell_12
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from scipy.stats import skew from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vmax=.8, square=True); numeric = [feature for feature in train.columns if train.dtypes[feature] != 'object'] numeric.remove('Id') numreicMostCorr = ['LotFrontage', 'OverallQual', 'YearBuilt', 'YearRemodAdd', '1stFlrSF', '2ndFlrSF', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageArea', 'GarageCars'] for feature in numreicMostCorr: featureDF = pd.concat([train['SalePrice'], train[feature]], axis=1) pairs = [('GarageArea', 'GarageCars'), ('YearBuilt', 'YearRemodAdd'), ('TotalBsmtSF', 'TotRmsAbvGrd'), ('GrLivArea', 'FullBath'), ('TotalBsmtSF', '1stFlrSF'), ('GrLivArea', '2ndFlrSF')] for pair in pairs: featureDF = pd.concat([train[pair[0]], train[pair[1]]], axis=1) categorical = [feature for feature in train.columns if train.dtypes[feature] == 'object'] for category in categorical: data = pd.concat([train[category], train['SalePrice']], axis=1) data[category] = data[category].astype('category') if data[category].isnull().any(): data[category] = data[category].cat.add_categories(['MISSING']) data[category] = data[category].fillna('MISSING') cat_data = pd.concat([data['SalePrice'], data[category]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=category, y="SalePrice", data=cat_data) fig.axis(ymin=0, ymax=800000) plt.show() numeric_missing = train[numreicMostCorr].isnull().sum().sort_values(ascending=False) categorical_missing = train[categorical].isnull().sum().sort_values(ascending=False) numeric_to_delete = numeric_missing[numeric_missing > 438].index categorical_to_delete = categorical_missing[categorical_missing > 438].index def removeFromList(sourceList, filterList): filteredList = list(filter(lambda x: x not in filterList, sourceList)) return filteredList numreicMostCorr = removeFromList(numreicMostCorr, numeric_to_delete) categorical = removeFromList(categorical, categorical_to_delete) numreicMostCorr = removeFromList(numreicMostCorr, ['GarageCars', '1stFlrSF', '2ndFlrSF', 'YearRemodAdd', 'FullBath']) categorical = removeFromList(categorical, ['Alley', 'LotShape', 'LandSlope', 'BldgType', 'Exterior1st', 'Exterior2nd', 'ExterCond', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'GarageFinish', 'GarageType', 'GarageCond', 'Fence']) all_columns = numreicMostCorr + categorical + ['SalePrice'] train = train[all_columns] train['LotFrontage'] = train['LotFrontage'].fillna(train['LotFrontage'].mean()) train[numreicMostCorr].isnull().sum().sort_values(ascending=False) train['BsmtQual'] = train['BsmtQual'].fillna('Missing') train['GarageQual'] = train['GarageQual'].fillna('Missing') train['MasVnrType'] = train['MasVnrType'].fillna('Missing') train = train.drop(train.loc[train['Electrical'].isnull()].index) train[categorical].isnull().sum().sort_values(ascending=False) from scipy.stats import skew skewed_cols = numreicMostCorr + ['SalePrice'] skewed = train[skewed_cols].apply(lambda x: skew(x.dropna())) skewed = skewed[skewed > 0.75] skewed = skewed.index train[skewed] = np.log1p(train[skewed]) for numer in skewed_cols: numerFeature = pd.DataFrame({'unskewed_' + numer: train[numer]}) numerFeature.hist()
code
2036121/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr() train[train.comment_text.isnull()] train[train.comment_text == '']
code
2036121/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate])
code
2036121/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic)
code
2036121/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr() train[train.comment_text.isnull()] train[train.comment_text == ''] train['comment_length'] = train.comment_text.str.len() train = train.sort_values(by='comment_length', ascending=False) pd.set_option('display.max_colwidth', -1) train.comment_text.head(1)
code
2036121/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr() train[train.comment_text.isnull()] train[train.comment_text == ''] train['comment_length'] = train.comment_text.str.len() train.comment_length.describe()
code
2036121/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate])
code
2036121/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr() train[train.comment_text.isnull()]
code
2036121/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts()
code
2036121/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr()
code
2036121/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) pd.crosstab(train.severe_toxic, [train.obscene, train.threat, train.insult, train.identity_hate]) train.iloc[:, 2:8].corr() train[train.comment_text.isnull()] train[train.comment_text == ''] train['comment_length'] = train.comment_text.str.len() train = train.sort_values(by='comment_length', ascending=False) pd.set_option('display.max_colwidth', -1) one_percent = int(np.ceil(train.shape[0] / 100)) train_sub = train.iloc[0:one_percent, :] train_sub.toxic.value_counts()
code
50223032/cell_21
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature dftrain_processed.shape
code
50223032/cell_9
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L = [] for feature in range(300): y = sns.regplot(x=str(feature), y='target', data=dftrain) L.append({'feature': feature, 'slope': getCorr(dftrain['target'], dftrain[str(feature)])})
code
50223032/cell_25
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature y_train = dftrain.pop('target') dftrain_processed.shape y_train.shape import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10)
code
50223032/cell_34
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature y_train = dftrain.pop('target') dftrain_processed.shape y_train.shape import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10) dftest_processed = pd.DataFrame() for iteration in features_to_save: feature = dftest[str(iteration)].values dftest_processed[str(iteration)] = feature pre = list(model.predict(dftest_processed)) def getPredictions(prediction_list): prediction_list_processced = [] for iteration in prediction_list: prediction_list_processced.append(round(iteration[1])) return prediction_list_processced dfsubmission = pd.DataFrame() dfsubmission['id'] = dftest['id'] dfsubmission['target'] = getPredictions(pre) dfsubmission
code
50223032/cell_30
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature y_train = dftrain.pop('target') dftrain_processed.shape y_train.shape import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10) dftest_processed = pd.DataFrame() for iteration in features_to_save: feature = dftest[str(iteration)].values dftest_processed[str(iteration)] = feature pre = list(model.predict(dftest_processed)) len(pre)
code
50223032/cell_33
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature y_train = dftrain.pop('target') dftrain_processed.shape y_train.shape import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10) dftest_processed = pd.DataFrame() for iteration in features_to_save: feature = dftest[str(iteration)].values dftest_processed[str(iteration)] = feature pre = list(model.predict(dftest_processed)) def getPredictions(prediction_list): prediction_list_processced = [] for iteration in prediction_list: prediction_list_processced.append(round(iteration[1])) return prediction_list_processced dfsubmission = pd.DataFrame() dfsubmission['id'] = dftest['id'] dfsubmission['target'] = getPredictions(pre)
code
50223032/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y_train = dftrain.pop('target') y_train
code
50223032/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') plt.scatter(dftrain['299'], dftrain['1']) plt.title('My PCA graph') plt.xlabel('0 -{0}%'.format(dftrain['299'])) plt.ylabel('target -{0}%'.format(dftrain['1']))
code
50223032/cell_26
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftest
code
50223032/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y = sns.regplot(x='1', y='target', data=dftrain)
code
50223032/cell_18
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature dftrain_processed
code
50223032/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) y.get_xlim()
code
50223032/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftrain['127'].values
code
50223032/cell_17
[ "image_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftest
code
50223032/cell_31
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) dftrain_processed = pd.DataFrame() for iteration in features_to_save: feature = dftrain[str(iteration)].values dftrain_processed[str(iteration)] = feature y_train = dftrain.pop('target') dftrain_processed.shape y_train.shape import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10) dftest_processed = pd.DataFrame() for iteration in features_to_save: feature = dftest[str(iteration)].values dftest_processed[str(iteration)] = feature pre = list(model.predict(dftest_processed)) pre[0][1]
code
50223032/cell_14
[ "image_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(L[iteration]['feature']) print(features_to_save)
code
50223032/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y_train = dftrain.pop('target') y_train.shape
code
50223032/cell_10
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L[0]['slope']
code
50223032/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) #L.append({'feature':feature,'slope':getslope(y.get_xlim(),y.get_ylim())}) L.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) L.sort(key=getSlope, reverse=True) print(L)
code
50223032/cell_5
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftrain
code
72120846/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import cv2 as cv import matplotlib.image as mpimg from matplotlib import pyplot as plt pd.options.display.float_format = '{:.2f}'.format training_labels = pd.read_csv('../input/landmark-recognition-2021/train.csv') training_labels['path1'] = training_labels['id'].str.slice(start=0, stop=1) training_labels['path2'] = training_labels['id'].str.slice(start=1, stop=2) training_labels['path3'] = training_labels['id'].str.slice(start=2, stop=3) training_labels['path'] = '../input/landmark-recognition-2021/train/' + training_labels['path1'] + '/' + training_labels['path2'] + '/' + training_labels['path3'] + '/' + training_labels['id'] + '.jpg' training_labels = training_labels.drop(['path1', 'path2', 'path3'], axis=1) piv = training_labels.pivot_table(index='landmark_id', aggfunc=lambda x: len(x.unique()))['id'] piv.sort_values()
code
72120846/cell_5
[ "image_output_1.png" ]
import matplotlib.image as mpimg import pandas as pd import pandas as pd import numpy as np import cv2 as cv import matplotlib.image as mpimg from matplotlib import pyplot as plt pd.options.display.float_format = '{:.2f}'.format training_labels = pd.read_csv('../input/landmark-recognition-2021/train.csv') training_labels['path1'] = training_labels['id'].str.slice(start=0, stop=1) training_labels['path2'] = training_labels['id'].str.slice(start=1, stop=2) training_labels['path3'] = training_labels['id'].str.slice(start=2, stop=3) training_labels['path'] = '../input/landmark-recognition-2021/train/' + training_labels['path1'] + '/' + training_labels['path2'] + '/' + training_labels['path3'] + '/' + training_labels['id'] + '.jpg' training_labels = training_labels.drop(['path1', 'path2', 'path3'], axis=1) piv = training_labels.pivot_table(index='landmark_id', aggfunc=lambda x: len(x.unique()))['id'] piv.sort_values() top_landmark = 138982 top_landmark_df = training_labels.loc[training_labels['landmark_id'] == top_landmark] top_landmark_sample = top_landmark_df.sample(n=16, random_state=2021) top_landmark_path = top_landmark_sample['path'] fig = plt.figure(figsize=(100, 100)) for i in range(0, 16): fig.add_subplot(4, 4, i + 1) plt.imshow(mpimg.imread(top_landmark_path.iloc[i])) plt.axis('off')
code
50210665/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data.head()
code
50210665/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'std'}) print('Store Number {} has maximum Standard Deviation. STD {}'.format(walmart_data_std['Weekly_Sales'].idxmax(), walmart_data_std['Weekly_Sales'].max()))
code
50210665/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50210665/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'std'}) walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': ['mean', 'std']}) walmart_data_std.head()
code
50210665/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'std'}) walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': ['mean', 'std']}) walmart_data_Q32012 = walmart_data[(pd.to_datetime(walmart_data['Date']) >= pd.to_datetime('07-01-2012')) & (pd.to_datetime(walmart_data['Date']) <= pd.to_datetime('09-30-2012'))] walmart_data_growth = walmart_data_Q32012.groupby(['Store'])['Weekly_Sales'].sum() print("Store Number {} has Good Quartely Growth in Q3'2012 {}".format(walmart_data_growth.idxmax(), walmart_data_growth.max()))
code
50210665/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() print('Store Number {} has maximum Sales. Sum of Total Sales {}'.format(walmart_data_groupby.idxmax(), walmart_data_groupby.max()))
code
73072460/cell_42
[ "text_html_output_1.png" ]
from IPython.display import Image Image(url='https://res.cloudinary.com/practicaldev/image/fetch/s--nUoflRuG--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://i.ibb.co/kG5vPdn/final-cnn.png', width=750, height=500)
code
73072460/cell_21
[ "image_output_1.png" ]
import tensorflow as tf training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True) training_generator = training_data_gen.flow_from_dataframe(dataframe=train, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64) val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0) validation_generator = val_data_gen.flow_from_dataframe(dataframe=val, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64) test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0) test_generator = test_data_gen.flow_from_dataframe(dataframe=test, x_col='images', y_col='labels', target_size=(224, 224), color_mode='rgb', class_mode='categorical', batch_size=64) mlp_model = tf.keras.models.Sequential() mlp_model.add(tf.keras.layers.Flatten(input_shape=(224, 224, 3))) mlp_model.add(tf.keras.layers.Dense(256, activation='relu')) mlp_model.add(tf.keras.layers.Dropout(0.4)) mlp_model.add(tf.keras.layers.Dense(256, activation='relu')) mlp_model.add(tf.keras.layers.Dense(128, activation='relu')) mlp_model.add(tf.keras.layers.Dense(9, activation='softmax')) mlp_model.summary() mlp_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) mlp_model.fit(training_generator, steps_per_epoch=24, validation_data=validation_generator, validation_steps=20, epochs=5)
code