path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18137853/cell_5
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from tqdm import tqdm, tqdm_notebook import matplotlib.patches as patches import matplotlib.patches as patches import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os, os.path from xml.etree import ElementTree as ET def parse_annotation(fname): objects = [] for child in ET.parse(fname).findall('object'): dog = {} dog['name'] = child.find('name').text dog['pose'] = child.find('pose').text dog['difficult'] = int(child.find('difficult').text) dog['truncated'] = int(child.find('truncated').text) bbox = child.find('bndbox') dog['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)] objects.append(dog) return objects IMAGE_DIR = '../input/all-dogs/all-dogs' dog_imgs = pd.DataFrame(os.listdir(IMAGE_DIR), columns=['filename']) dog_imgs['basename'] = dog_imgs['filename'].str.split('.').apply(lambda x: x[0]) dog_imgs[['class', 'id']] = dog_imgs['basename'].str.split('_', expand=True) dog_imgs = dog_imgs.set_index('basename').sort_index() ANNOTATION_DIR = '../input/annotation/Annotation' dog_breeds = pd.DataFrame(os.listdir(ANNOTATION_DIR), columns=['dirname']) dog_breeds[['class', 'breedname']] = dog_breeds['dirname'].str.split('-', 1, expand=True) dog_breeds = dog_breeds.set_index('class').sort_index() dog_imgs['annotation_filename'] = dog_imgs.apply(lambda x: os.path.join(ANNOTATION_DIR, dog_breeds.loc[x['class']]['dirname'], x.name), axis=1) dog_imgs['objects'] = dog_imgs['annotation_filename'].apply(parse_annotation) doggo = dog_imgs.sample(1).iloc[0] import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.patches as patches from PIL import Image import imgaug.augmenters as iaa pil_im = Image.open(os.path.join(IMAGE_DIR, doggo['filename'])) im = np.asarray(pil_im) fig,ax = plt.subplots(1) ax.imshow(im) h,w,c = im.shape for dog in doggo['objects']: xmin, ymin, xmax, ymax = dog['bbox'] print(h,w,":",xmin,ymin,xmax,ymax) bbox = patches.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, linewidth=1, edgecolor='r', facecolor='none') ax.add_patch(bbox) plt.show() fig,ax = plt.subplots(1) dog = doggo.objects[0] h,w,c = im.shape xmin, ymin, xmax, ymax = dog['bbox'] #im = im[ymin:ymax,xmin:xmax] pil_crop = pil_im.crop((xmin, ymin, xmax, ymax)).resize((64, 64)) im2 = np.asarray(pil_crop) ax.imshow(im2) plt.show() import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.patches as patches from PIL import Image import imgaug.augmenters as iaa from tqdm import tqdm, tqdm_notebook def get_truth_images(): all_imgs = [] for _, doggo in tqdm_notebook(dog_imgs.iterrows(), total=len(dog_imgs)): pil_im = Image.open(os.path.join(IMAGE_DIR, doggo['filename'])) h, w, c = im.shape for dog in doggo['objects']: border = 10 xmin, ymin, xmax, ymax = dog['bbox'] xmin = max(0, xmin - border) ymin = max(0, ymin - border) xmax = min(w, xmax + border) ymax = min(h, ymax + border) pil_crop = pil_im.crop((xmin, ymin, xmax, ymax)).resize((64, 64)) all_imgs.append(np.asarray(pil_crop)) return np.stack(all_imgs) truth_imgs = get_truth_images()
code
106213588/cell_5
[ "text_plain_output_1.png" ]
import torch class Var: """simple variable container""" def __init__(self, value): self.v = value self.grad = 0 self.dag = None def __add__(self, other): """Overloads + operator""" if not isinstance(other, Var): other = Var(other) result = Var(self.v + other.v) result.dag = ('+', (self, other)) return result def __radd__(self, other): """Reverse addition. Needed when user tries to calculate 2 + a""" return self + other def __pow__(self, other): """Overloads power operator""" assert not isinstance(other, Var), 'only support primitives yet' result = Var(self.v ** other) result.dag = ('^', (self, other)) return result def __mul__(self, other): """Overloads multiplication operator""" if not isinstance(other, Var): other = Var(other) result = Var(self.v * other.v) result.dag = ('*', (self, other)) return result def __rmul__(self, other): return self * other def __neg__(self): return self * -1 def __sub__(self, other): return self + -other def __truediv__(self, other): return self * other ** (-1) def __rtruediv__(self, other): return other * self.v ** (-1) def __backward(self, seed): """this gets recursively called""" if self.dag is None: return a, b = (None, None) op, prevs = self.dag if len(prevs) == 2: a, b = prevs else: a = list(prevs)[0] if op == '+': a.grad += seed b.grad += seed elif op == '*': a.grad += b.v * seed b.grad += a.v * seed elif op == '^': a.grad += b * a.v ** (b - 1) * seed a.__backward(a.grad) if isinstance(b, Var): b.__backward(b.grad) def backward(self, seed=1.0): """ do the backward pass and calculate parital derivative of each parent `Var` Parameters ---------- seed (float): normally it should be set to 1 because dy/dy = 1. the derivatives of the child gets passed to the parents through this variable. """ self.grad = 0 self.__backward(seed) self.grad = seed def reset(self): """resets the gradient""" if self.dag is None: return a, b = (None, None) if len(self.dag[1]) == 2: a, b = self.dag[1] else: a = list(self.dag[1])[0] a.grad = 0 a.reset() if b is not None: b.grad = 0 b.reset() def __repr__(self): return f'[{self.v} | {self.grad}]' def test_sanity_check(it=None): x = Var(-4.0) z = 2 * x + 2 + x y = z * x xmg, ymg = (x, y) x = torch.Tensor([-4.0]).double() x.requires_grad = True z = 2 * x + 2 + x y = z * x xpt, ypt = (x, y) assert ymg.v == ypt.data.item() ymg.backward() ypt.backward() assert xmg.grad == xpt.grad.item() a = Var(-3.0) a = -a + 20 - 2 a.backward() b = torch.Tensor([-3.0]).double() b.requires_grad = True b = -b + 20 - 2 b.backward() assert a.v == b.data.item() x = Var(2.0) y = x ** 3 z = torch.tensor([2.0], requires_grad=True) f = z ** 3 assert y.v == f.data.item() y.backward() f.backward() assert x.grad == z.grad.item() test_sanity_check() def gradTest(): """test gradient calculations""" a = 6.0 b = 2.2 c = 3.0 a1 = Var(a) b1 = Var(b) z1 = a1 * b1 z1.backward() print(f'SG Gradients of z = ab is, a={a1.grad}, b={b1.grad}') a2 = torch.tensor(a, requires_grad=True) b2 = torch.tensor(b, requires_grad=True) z2 = a2 * b2 z2.backward() print(f'Torch Gradients of z = ab is, a={a2.grad}, b={b2.grad}') z1.reset() print(a1.grad, b1.grad, len(z1.dag)) c1 = Var(c) d1 = a1 + b1 z1 = d1 * c1 z1.backward() print(f'SG Gradients of z = (a+b)c = {z1.v} is, a={a1.grad}, b={b1.grad}, c={c1.grad}') a2 = torch.tensor(a, requires_grad=True) b2 = torch.tensor(b, requires_grad=True) c2 = torch.tensor(c, requires_grad=True) d2 = a2 + b2 z2 = d2 * c2 z2.backward() print(f'Torch Gradients of z = (a+b)c = {z2} is, a={a2.grad}, b={b2.grad}, c={c2.grad}') a1 = Var(a) z1 = a1 + a1 z1.backward() tol = 1e-06 a2 = torch.tensor([a], requires_grad=True) a3 = a2 + a2 a3.retain_grad() a3.backward(gradient=torch.tensor([1.0])) assert a1.grad == a2.grad gradTest()
code
32068950/cell_13
[ "text_plain_output_1.png" ]
from collections import OrderedDict from sklearn.linear_model import RidgeCV import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] X_train = np.vstack(list(X_train.values())) X_test = np.vstack(list(X_test.values())) Y_train = np.vstack(list(Y_train.values())) Y_test = np.vstack(list(Y_test.values())) Y_train.shape y_train = Y_train[:, 0] y_test = Y_test[:, 0] ridge_cc = RidgeCV() ridge_cc.fit(X_train, y_train) y_train = Y_train[:, 1] y_test = Y_test[:, 1] ridge_f = RidgeCV() ridge_f.fit(X_train, y_train) print(ridge_f.score(X_train, y_train)) print(ridge_f.score(X_test, y_test))
code
32068950/cell_9
[ "text_plain_output_1.png" ]
from collections import OrderedDict import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): print(area) area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):]
code
32068950/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') countries.head()
code
32068950/cell_20
[ "text_plain_output_1.png" ]
from collections import OrderedDict from sklearn.linear_model import RidgeCV import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] X_train = np.vstack(list(X_train.values())) X_test = np.vstack(list(X_test.values())) Y_train = np.vstack(list(Y_train.values())) Y_test = np.vstack(list(Y_test.values())) Y_train.shape y_train = Y_train[:, 0] y_test = Y_test[:, 0] ridge_cc = RidgeCV() ridge_cc.fit(X_train, y_train) y_train = Y_train[:, 1] y_test = Y_test[:, 1] ridge_f = RidgeCV() ridge_f.fit(X_train, y_train) test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv').set_index('ForecastId') test_data = transform_area(test_data) test_data = transform_countries(test_data) test_data['ConfirmedCases'] = np.NaN test_data['Fatalities'] = np.NaN last_date = train_data.Date.max() last_date train_dates = set(train_data.Date) test_dates = set(test_data.Date) for i in test_data.loc[test_data.Date <= last_date].index: date = test_data.loc[i].Date if date in set(train_data.Date): slc = train_data.loc[(train_data.Date == date) & (train_data.Area == test_data.loc[i, 'Area'])] test_data.loc[i, 'ConfirmedCases'] = slc['ConfirmedCases'].iloc[0] test_data.loc[i, 'Fatalities'] = slc['Fatalities'].iloc[0] for area in sorted(list(set(test_data.Area))): print(area) area_data = test_data.loc[test_data.Area == area].set_index('Date').sort_index() for i in area_data.index: if not np.isnan(area_data.loc[i, 'ConfirmedCases']) and (not np.isnan(area_data.loc[i, 'Fatalities'])): continue x = np.zeros(2 * N_FEATURES + 2) for j in range(1, 1 + N_FEATURES): x[j * 2 - 2] = area_data.shift(j).loc[i].ConfirmedCases x[j * 2 - 1] = area_data.shift(j).loc[i].Fatalities x[2 * N_FEATURES] = area_data.loc[i].Density x[2 * N_FEATURES + 1] = area_data.loc[i].InfantMortality x = x.reshape(1, -1) test_data.loc[(test_data.Area == area) & (test_data.Date == i), 'ConfirmedCases'] = ridge_cc.predict(x)[0] test_data.loc[(test_data.Area == area) & (test_data.Date == i), 'Fatalities'] = ridge_f.predict(x)[0] area_data.loc[i, 'ConfirmedCases'] = ridge_cc.predict(x)[0] + area_data.shift(1).loc[i, 'ConfirmedCases'] area_data.loc[i, 'Fatalities'] = ridge_f.predict(x)[0] + area_data.shift(1).loc[i, 'Fatalities']
code
32068950/cell_11
[ "text_html_output_1.png" ]
from collections import OrderedDict import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] X_train = np.vstack(list(X_train.values())) X_test = np.vstack(list(X_test.values())) Y_train = np.vstack(list(Y_train.values())) Y_test = np.vstack(list(Y_test.values())) Y_train.shape
code
32068950/cell_19
[ "text_plain_output_1.png" ]
from collections import OrderedDict import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv').set_index('ForecastId') test_data = transform_area(test_data) test_data = transform_countries(test_data) last_date = train_data.Date.max() last_date train_dates = set(train_data.Date) test_dates = set(test_data.Date) for i in test_data.loc[test_data.Date <= last_date].index: date = test_data.loc[i].Date if date in set(train_data.Date): slc = train_data.loc[(train_data.Date == date) & (train_data.Area == test_data.loc[i, 'Area'])] test_data.loc[i, 'ConfirmedCases'] = slc['ConfirmedCases'].iloc[0] test_data.loc[i, 'Fatalities'] = slc['Fatalities'].iloc[0] test_data.info()
code
32068950/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) from collections import OrderedDict from sklearn.linear_model import RidgeCV from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import GradientBoostingRegressor from xgboost import XGBRegressor
code
32068950/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) train_data.head()
code
32068950/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) train_data.info()
code
32068950/cell_15
[ "text_plain_output_1.png" ]
from collections import OrderedDict import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv').set_index('ForecastId') test_data = transform_area(test_data) test_data = transform_countries(test_data) test_data.head()
code
32068950/cell_17
[ "text_plain_output_1.png" ]
from collections import OrderedDict import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] last_date = train_data.Date.max() last_date
code
32068950/cell_12
[ "text_html_output_1.png" ]
from collections import OrderedDict from sklearn.linear_model import RidgeCV import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) N_FEATURES = 5 countries = pd.read_csv('/kaggle/input/countries-of-the-world/countries of the world.csv', decimal=',') countries['Country'] = countries.Country.str.lower() countries = countries.set_index('Country') train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv').set_index('Id') def transform_area(data): data['Area'] = '' data.loc[~data.Province_State.isna(), 'Area'] = data.loc[~data.Province_State.isna()].Country_Region.str.lower() + '/' + data.Province_State.str.lower() data.loc[data.Province_State.isna(), 'Area'] = data.loc[data.Province_State.isna()].Country_Region.str.lower() data = data.drop('Province_State', axis='columns') return data def transform_countries(data): data['Density'] = countries['Pop. Density (per sq. mi.)'].mean() data['InfantMortality'] = countries['Infant mortality (per 1000 births)'].mean() for i in data.index: country = data.loc[i, 'Country_Region'].lower() if country in countries.index: data.loc[i, 'Density'] = countries.loc[country, 'Pop. Density (per sq. mi.)'] return data train_data = transform_area(train_data) train_data = transform_countries(train_data) X_train = {} X_test = {} Y_train = {} Y_test = {} for area in list(set(train_data.Area)): area_data = train_data.loc[train_data.Area == area].set_index('Date').sort_index() Y = area_data[['ConfirmedCases', 'Fatalities']] - area_data.shift(1)[['ConfirmedCases', 'Fatalities']] dic = OrderedDict() for i in range(1, 1 + N_FEATURES): dic['CC_{}'.format(i)] = area_data.shift(i)['ConfirmedCases'] dic['F_{}'.format(i)] = area_data.shift(i)['Fatalities'] dic['Density'] = area_data['Density'] dic['InfantMortality'] = area_data['InfantMortality'] X = pd.DataFrame(dic, index=area_data.index) X = X.dropna(axis='index') Y = Y.loc[X.index] X_train[area] = X.iloc[:int(len(X) * 0.8)] X_test[area] = X.iloc[int(len(X) * 0.8):] Y_train[area] = Y.iloc[:int(len(X) * 0.8)] Y_test[area] = Y.iloc[int(len(X) * 0.8):] X_train = np.vstack(list(X_train.values())) X_test = np.vstack(list(X_test.values())) Y_train = np.vstack(list(Y_train.values())) Y_test = np.vstack(list(Y_test.values())) Y_train.shape y_train = Y_train[:, 0] y_test = Y_test[:, 0] ridge_cc = RidgeCV() ridge_cc.fit(X_train, y_train) print(ridge_cc.score(X_train, y_train)) print(ridge_cc.score(X_test, y_test))
code
105180497/cell_25
[ "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape train_data[non_numerical_label[i]].values
code
105180497/cell_29
[ "text_plain_output_1.png" ]
from statsmodels.formula.api import ols import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape plt.rcParams['figure.figsize'] = [25, 25] panels=int(np.ceil(np.sqrt(len(numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_numical_features=[] for i in range(0,len(numerical_label)): x=list(train_data[numerical_label[i]]) y=list(train_data['SalePrice']) r=np.corrcoef(x, y) r=r[0,1] axs[i//panels,i%panels].scatter(x,y) axs[i//panels,i%panels].title.set_text(numerical_label[i]+": "+str(r)) if r>0.5 or r<-0.5: selected_numical_features.append(numerical_label[i]) plt.rcParams['figure.figsize'] = [30, 40] panels=int(np.ceil(np.sqrt(len(non_numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_non_numerical_features=[] for i in range(0,len(non_numerical_label)): dataset=[] unique_cats=np.unique(train_data[non_numerical_label[i]].values) tbl_temp=train_data[[non_numerical_label[i],'SalePrice']] for catg in unique_cats: dataset.append(tbl_temp.loc[tbl_temp[non_numerical_label[i]]==catg,'SalePrice'].values) p=-1 if len(unique_cats)>=2: model = ols('SalePrice ~ C('+non_numerical_label[i]+')', data=tbl_temp).fit() aov_table = sm.stats.anova_lm(model, typ=2) p=aov_table['PR(>F)'][0] axs[i//panels,i%panels].violinplot(dataset) axs[i//panels,i%panels].title.set_text(non_numerical_label[i]+'\n'+' Anova p: '+str(float("{:.5f}".format(p)))) axs[i//panels,i%panels].set_xticks(np.arange(1, len(unique_cats) + 1)) axs[i//panels,i%panels].set_xticklabels(unique_cats) if p!=-1 and p<0.05: # at least one cat is significantly different than others selected_non_numerical_features.append(non_numerical_label[i]) selected_non_numerical_features
code
105180497/cell_26
[ "text_plain_output_1.png" ]
from statsmodels.formula.api import ols import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape plt.rcParams['figure.figsize'] = [25, 25] panels=int(np.ceil(np.sqrt(len(numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_numical_features=[] for i in range(0,len(numerical_label)): x=list(train_data[numerical_label[i]]) y=list(train_data['SalePrice']) r=np.corrcoef(x, y) r=r[0,1] axs[i//panels,i%panels].scatter(x,y) axs[i//panels,i%panels].title.set_text(numerical_label[i]+": "+str(r)) if r>0.5 or r<-0.5: selected_numical_features.append(numerical_label[i]) plt.rcParams['figure.figsize'] = [30, 40] panels = int(np.ceil(np.sqrt(len(non_numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_non_numerical_features = [] for i in range(0, len(non_numerical_label)): dataset = [] unique_cats = np.unique(train_data[non_numerical_label[i]].values) tbl_temp = train_data[[non_numerical_label[i], 'SalePrice']] for catg in unique_cats: dataset.append(tbl_temp.loc[tbl_temp[non_numerical_label[i]] == catg, 'SalePrice'].values) p = -1 if len(unique_cats) >= 2: model = ols('SalePrice ~ C(' + non_numerical_label[i] + ')', data=tbl_temp).fit() aov_table = sm.stats.anova_lm(model, typ=2) p = aov_table['PR(>F)'][0] axs[i // panels, i % panels].violinplot(dataset) axs[i // panels, i % panels].title.set_text(non_numerical_label[i] + '\n' + ' Anova p: ' + str(float('{:.5f}'.format(p)))) axs[i // panels, i % panels].set_xticks(np.arange(1, len(unique_cats) + 1)) axs[i // panels, i % panels].set_xticklabels(unique_cats) if p != -1 and p < 0.05: selected_non_numerical_features.append(non_numerical_label[i])
code
105180497/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() test_data.shape
code
105180497/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105180497/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_data.head()
code
105180497/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape
code
105180497/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape plt.rcParams['figure.figsize'] = [25, 25] panels=int(np.ceil(np.sqrt(len(numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_numical_features=[] for i in range(0,len(numerical_label)): x=list(train_data[numerical_label[i]]) y=list(train_data['SalePrice']) r=np.corrcoef(x, y) r=r[0,1] axs[i//panels,i%panels].scatter(x,y) axs[i//panels,i%panels].title.set_text(numerical_label[i]+": "+str(r)) if r>0.5 or r<-0.5: selected_numical_features.append(numerical_label[i]) selected_numical_features
code
105180497/cell_15
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) print('dropped rows by SalePrice:' + ' '.join(list([str(row2drop[i]) for i in range(0, len(row2drop))])))
code
105180497/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() print('# of duplicated rows in training data: ' + str(sum(dup_rows_train == True))) dup_rows_train = test_data.duplicated() print('# of duplicated rows in test data: ' + str(sum(dup_rows_train == True)))
code
105180497/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data
code
105180497/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) train_data[numerical_label] = train_data[numerical_label].fillna(0) train_data[non_numerical_label] = train_data[non_numerical_label].fillna('n') test_data[numerical_label] = test_data[numerical_label].fillna(0) test_data[non_numerical_label] = test_data[non_numerical_label].fillna('n') na_per_row = train_data.isna().sum(axis=1) row2drop = list(train_data.index[na_per_row > 0]) train_data = train_data.drop(row2drop, axis=0) dup_rows_train = train_data.duplicated() dup_rows_train = test_data.duplicated() train_data.shape plt.rcParams['figure.figsize'] = [25, 25] panels = int(np.ceil(np.sqrt(len(numerical_label)))) fig, axs = plt.subplots(panels, panels) selected_numical_features = [] for i in range(0, len(numerical_label)): x = list(train_data[numerical_label[i]]) y = list(train_data['SalePrice']) r = np.corrcoef(x, y) r = r[0, 1] axs[i // panels, i % panels].scatter(x, y) axs[i // panels, i % panels].title.set_text(numerical_label[i] + ': ' + str(r)) if r > 0.5 or r < -0.5: selected_numical_features.append(numerical_label[i])
code
105180497/cell_12
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') numerical_label = list(train_data.dtypes[train_data.dtypes != 'object'].index) non_numerical_label = list(train_data.dtypes[train_data.dtypes == 'object'].index) numerical_label.remove('Id') numerical_label.remove('SalePrice') na_per_col_train = train_data.isna().sum(axis=0) na_per_col_test = test_data.isna().sum(axis=0) na_per_col = np.array(na_per_col_train[0:80]) + np.array(na_per_col_test) col2drop_train = list(train_data.columns[na_per_col_train >= train_data.shape[0] * 0.2]) col2drop_test = list(test_data.columns[na_per_col_test >= test_data.shape[0] * 0.2]) col2drop_train.extend(col2drop_test) col2drop = np.unique(col2drop_train) train_data = train_data.drop(col2drop, axis=1) test_data = test_data.drop(col2drop, axis=1) numerical_label = list(set(numerical_label).difference(set(col2drop))) non_numerical_label = list(set(non_numerical_label).difference(set(col2drop))) print('dropped columns: ' + ' '.join(col2drop))
code
90137984/cell_9
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from numpy import linspace from pandas import read_csv from sklearn.compose import make_column_transformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.pipeline import make_pipeline, make_union from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, StandardScaler from pandas import read_csv train = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/train.csv') train = train.drop(['id', 'circle_id'], axis=1) test = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/test.csv') sample = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/sample.csv') metadata = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/data_dictionary.csv') mask = train.isna().sum() / len(train) > 0.5 train = train.loc[:, ~mask] from sklearn.impute import SimpleImputer from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline, make_union from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.ensemble import BaggingClassifier from sklearn.metrics import accuracy_score from numpy import linspace X_train, y_train = (train.drop('churn_probability', axis=1), train['churn_probability']) X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1, stratify=y_train) impute_onehot = make_column_transformer((SimpleImputer(), X_train.select_dtypes(include='number').columns), (OneHotEncoder(handle_unknown='ignore'), X_train.select_dtypes(include='object').columns)) base = make_pipeline(impute_onehot, StandardScaler(), SGDClassifier(n_jobs=-1)) base.fit(X_train, y_train) accuracy_score(y_test, base.predict(X_test)) model = make_pipeline(impute_onehot, StandardScaler(), SGDClassifier(class_weight='balanced')) model = RandomizedSearchCV(model, {'sgdclassifier__alpha': linspace(0.0001, 2), 'sgdclassifier__loss': ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron']}, cv=10, n_iter=30, verbose=True, scoting='accuracy', n_jobs=-1) model.fit(X_train, y_train) accuracy_score(y_test, model.predict(X_test))
code
90137984/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import read_csv from pandas import read_csv train = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/train.csv') train = train.drop(['id', 'circle_id'], axis=1) test = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/test.csv') sample = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/sample.csv') metadata = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/data_dictionary.csv') (train.isna().sum() / len(train) * 100).plot(ylim=(0, 100), figsize=(15, 5))
code
90137984/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import read_csv from pandas import read_csv train = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/train.csv') train = train.drop(['id', 'circle_id'], axis=1) test = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/test.csv') sample = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/sample.csv') metadata = read_csv('/kaggle/input/telecom-churn-case-study-hackathon-gc1/data_dictionary.csv') mask = train.isna().sum() / len(train) > 0.5 train = train.loc[:, ~mask] (train.isna().sum() / len(train) * 100).plot(ylim=(0, 100), figsize=(15, 5))
code
105175580/cell_13
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import graphviz import graphviz train.dtypes pred = clf.predict(test) pred predict = clf.predict_proba(test) predict from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier() clf = clf.fit(x, y) from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf = clf.fit(x, y) from sklearn.tree import DecisionTreeClassifier from sklearn import tree import graphviz clf = DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf.fit(x, y) clf_feature_names = list(x.columns) dot_data = tree.export_graphviz(clf, feature_names=clf_feature_names, class_names=['0', '1'], filled=True, rounded=True, special_characters=True, out_file=None) graph = graphviz.Source(dot_data) graph from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=3, random_state=1, splitter='best', min_samples_leaf=2) clf = clf.fit(x, y) from sklearn.tree import DecisionTreeClassifier from sklearn import tree import graphviz clf = tree.DecisionTreeClassifier(max_depth=3, random_state=1, splitter='best', min_samples_leaf=2) clf.fit(x, y) clf_feature_names = list(x.columns) dot_data = tree.export_graphviz(clf, feature_names=clf_feature_names, class_names=['0', '1'], filled=True, rounded=True, special_characters=True, out_file=None) graph = graphviz.Source(dot_data) graph
code
105175580/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
train.describe()
code
105175580/cell_11
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import graphviz train.dtypes pred = clf.predict(test) pred predict = clf.predict_proba(test) predict from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier() clf = clf.fit(x, y) from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf = clf.fit(x, y) from sklearn.tree import DecisionTreeClassifier from sklearn import tree import graphviz clf = DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf.fit(x, y) clf_feature_names = list(x.columns) dot_data = tree.export_graphviz(clf, feature_names=clf_feature_names, class_names=['0', '1'], filled=True, rounded=True, special_characters=True, out_file=None) graph = graphviz.Source(dot_data) graph
code
105175580/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
pred = clf.predict(test) pred predict = clf.predict_proba(test) predict
code
105175580/cell_3
[ "text_plain_output_1.png" ]
train.info()
code
105175580/cell_10
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn import tree from sklearn import tree train.dtypes pred = clf.predict(test) pred predict = clf.predict_proba(test) predict from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier() clf = clf.fit(x, y) from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf = clf.fit(x, y) tree.plot_tree(clf)
code
105175580/cell_12
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn import tree from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import graphviz train.dtypes pred = clf.predict(test) pred predict = clf.predict_proba(test) predict from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier() clf = clf.fit(x, y) from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf = clf.fit(x, y) from sklearn.tree import DecisionTreeClassifier from sklearn import tree import graphviz clf = DecisionTreeClassifier(max_depth=2, random_state=0, splitter='best') clf.fit(x, y) clf_feature_names = list(x.columns) dot_data = tree.export_graphviz(clf, feature_names=clf_feature_names, class_names=['0', '1'], filled=True, rounded=True, special_characters=True, out_file=None) graph = graphviz.Source(dot_data) graph from sklearn import tree from sklearn.tree import DecisionTreeClassifier x = train.drop('TARGET', axis=1) y = train.TARGET clf = tree.DecisionTreeClassifier(max_depth=3, random_state=1, splitter='best', min_samples_leaf=2) clf = clf.fit(x, y) tree.plot_tree(clf)
code
105175580/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
train.dtypes
code
73078742/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing df.head()
code
73078742/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16, 10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show()
code
73078742/cell_20
[ "image_output_1.png" ]
from sklearn import preprocessing from sklearn.feature_selection import RFE from sklearn.metrics import explained_variance_score from sklearn.model_selection import train_test_split from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=.5) ax.set_title('Heatmap showing correlation of variables') plt.show() from sklearn.feature_selection import RFE from sklearn.svm import SVR X, y = (df_le.iloc[:, :-1], df_le.iloc[:, -1]) estimator = SVR(kernel='linear') selector = RFE(estimator, n_features_to_select=4) selector = selector.fit(X, y) selected_cols = [c for i, c in enumerate(X.columns) if selector.support_[i]] from sklearn.model_selection import train_test_split from sklearn.metrics import explained_variance_score X = df_le[selected_cols] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) clf = SVR(kernel='linear') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) explained_variance_score(y_test, y_pred) x_sorted = X_test.sort_values(by=['pretest']) fig, ax = plt.subplots(figsize=(16,7)) x_predictions = pd.Series(clf.predict(x_sorted)) y_actuals = pd.Series(y_test[x_sorted.index]) y_actuals.reset_index().posttest.plot(ax=ax, linewidth=4) x_predictions.plot(ax=ax, linewidth=.8) ax.legend(['actual', 'predicted']) X = df_le.drop(['test_diff', 'pretest', 'posttest'], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) clf = SVR(kernel='linear') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) explained_variance_score(y_test, y_pred)
code
73078742/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16, 7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16, 5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout()
code
73078742/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df
code
73078742/cell_11
[ "text_html_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) df_le.head()
code
73078742/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15, 5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show()
code
73078742/cell_15
[ "image_output_2.png", "image_output_1.png" ]
from sklearn import preprocessing from sklearn.feature_selection import RFE from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=.5) ax.set_title('Heatmap showing correlation of variables') plt.show() from sklearn.feature_selection import RFE from sklearn.svm import SVR X, y = (df_le.iloc[:, :-1], df_le.iloc[:, -1]) estimator = SVR(kernel='linear') selector = RFE(estimator, n_features_to_select=4) selector = selector.fit(X, y) selected_cols = [c for i, c in enumerate(X.columns) if selector.support_[i]] print(f'Used recursive feature elimination to select the following columns for our training:\n\n{selected_cols}')
code
73078742/cell_16
[ "image_output_1.png" ]
from sklearn import preprocessing from sklearn.feature_selection import RFE from sklearn.metrics import explained_variance_score from sklearn.model_selection import train_test_split from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=.5) ax.set_title('Heatmap showing correlation of variables') plt.show() from sklearn.feature_selection import RFE from sklearn.svm import SVR X, y = (df_le.iloc[:, :-1], df_le.iloc[:, -1]) estimator = SVR(kernel='linear') selector = RFE(estimator, n_features_to_select=4) selector = selector.fit(X, y) selected_cols = [c for i, c in enumerate(X.columns) if selector.support_[i]] from sklearn.model_selection import train_test_split from sklearn.metrics import explained_variance_score X = df_le[selected_cols] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) clf = SVR(kernel='linear') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) explained_variance_score(y_test, y_pred)
code
73078742/cell_17
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.feature_selection import RFE from sklearn.metrics import explained_variance_score from sklearn.model_selection import train_test_split from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=.5) ax.set_title('Heatmap showing correlation of variables') plt.show() from sklearn.feature_selection import RFE from sklearn.svm import SVR X, y = (df_le.iloc[:, :-1], df_le.iloc[:, -1]) estimator = SVR(kernel='linear') selector = RFE(estimator, n_features_to_select=4) selector = selector.fit(X, y) selected_cols = [c for i, c in enumerate(X.columns) if selector.support_[i]] from sklearn.model_selection import train_test_split from sklearn.metrics import explained_variance_score X = df_le[selected_cols] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) clf = SVR(kernel='linear') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) explained_variance_score(y_test, y_pred) x_sorted = X_test.sort_values(by=['pretest']) fig, ax = plt.subplots(figsize=(16, 7)) x_predictions = pd.Series(clf.predict(x_sorted)) y_actuals = pd.Series(y_test[x_sorted.index]) y_actuals.reset_index().posttest.plot(ax=ax, linewidth=4) x_predictions.plot(ax=ax, linewidth=0.8) ax.legend(['actual', 'predicted'])
code
73078742/cell_14
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15,10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=.5) ax.set_title('Heatmap showing correlation of variables') plt.show() df_le
code
73078742/cell_12
[ "image_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() fig, axes = plt.subplots(figsize=(16,7)) ax1 = df.school.value_counts()[::-1].plot(kind='bar', ax=axes) ax1.set_title('School distribution') fig, axes = plt.subplots(ncols=2, figsize=(16,5)) ax2 = df.school_setting.value_counts().plot(kind='bar', ax=axes[0]) ax3 = df.school_type.value_counts().plot(kind='bar', ax=axes[1]) ax2.set_title('School setting') ax3.set_title('School type') plt.tight_layout() fig, ax = plt.subplots(figsize=(15,5)) df.groupby('school').pretest.mean().sort_values().plot(kind='bar', ax=ax) ax.set_title('Comparison of average scores by school') plt.show() from sklearn import preprocessing label_decoder = dict() for col in ['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']: le = preprocessing.LabelEncoder() le.fit_transform(df[col]) label_decoder[col] = le df_le = df[['school', 'school_setting', 'school_type', 'classroom', 'teaching_method', 'gender', 'lunch']].apply(le.fit_transform, axis='index') df_le[['n_student', 'pretest', 'posttest']] = df[['n_student', 'pretest', 'posttest']] df_le.insert(8, 'test_diff', df_le.posttest - df_le.pretest) x = df_le.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled) df_norm.columns = df_le.columns corr = df_norm.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style('white'): fig, ax = plt.subplots(figsize=(15, 10)) sns.heatmap(corr, ax=ax, cmap=sns.color_palette('light:#e24a33', as_cmap=True), xticklabels=True, mask=mask, linewidths=0.5) ax.set_title('Heatmap showing correlation of variables') plt.show()
code
73078742/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv') df plt.style.use('ggplot') fig, axes = plt.subplots(nrows=2) ax1 = df.pretest.plot.kde(figsize=(16,10), ax=axes[0]) ax1 = df.posttest.plot.kde(ax=axes[0]) ax1.legend(['Pretest', 'Posttest']) ax1.set_title('Density plot of pre/post test scores') ax2 = df.n_student.plot.kde(ax=axes[1]) ax2.set_title('Density plot of number of students in class') plt.tight_layout() plt.show() print(f'The median moved {df.posttest.median() - df.pretest.median()} points from pretest to posttest')
code
18159704/cell_4
[ "text_plain_output_1.png" ]
from glob import glob import cv2 import numpy as np train_data1 = glob('../input/fruits-360_dataset/fruits-360/Training/Raspberry/*') train_data2 = glob('../input/fruits-360_dataset/fruits-360/Training/Pomelo Sweetie/*') test_data1 = glob('../input/fruits-360_dataset/fruits-360/Test/Raspberry/*') test_data2 = glob('../input/fruits-360_dataset/fruits-360/Test/Pomelo Sweetie/*') x_train = [] x_test = [] def create_data(data, values): for i in values: im = cv2.imread(i, 0) data.append(im) return len(values) train_Rasberry_size = create_data(x_train, train_data1) train_Pomelo_size = create_data(x_train, train_data2) test_Rasberry_size = create_data(x_test, test_data1) test_Pomelo_size = create_data(x_test, test_data2) x_train = np.asarray(x_train, dtype=np.float64) x_train = x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2]) z = np.zeros(train_Rasberry_size) o = np.ones(train_Pomelo_size) y_train = np.concatenate((z, o), axis=0) x_test = np.asarray(x_test, dtype=np.float64) x_test = x_test.reshape(x_test.shape[0], x_test.shape[1] * x_test.shape[2]) z = np.zeros(test_Rasberry_size) o = np.ones(test_Pomelo_size) y_test = np.concatenate((z, o), axis=0) print('x_test:', x_test.shape) print('y_test:', y_test.shape)
code
18159704/cell_2
[ "text_plain_output_1.png" ]
from glob import glob import cv2 train_data1 = glob('../input/fruits-360_dataset/fruits-360/Training/Raspberry/*') train_data2 = glob('../input/fruits-360_dataset/fruits-360/Training/Pomelo Sweetie/*') test_data1 = glob('../input/fruits-360_dataset/fruits-360/Test/Raspberry/*') test_data2 = glob('../input/fruits-360_dataset/fruits-360/Test/Pomelo Sweetie/*') x_train = [] x_test = [] def create_data(data, values): for i in values: im = cv2.imread(i, 0) data.append(im) return len(values) train_Rasberry_size = create_data(x_train, train_data1) train_Pomelo_size = create_data(x_train, train_data2) print('train_Raspberry_size:{} || train_Pomelo_size:{}'.format(train_Rasberry_size, train_Pomelo_size)) test_Rasberry_size = create_data(x_test, test_data1) test_Pomelo_size = create_data(x_test, test_data2) print('test_Raspberry_size:{} || test_Pomelo_size:{}'.format(test_Rasberry_size, test_Pomelo_size))
code
18159704/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import os import cv2 import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score from sklearn.preprocessing import minmax_scale from keras.wrappers.scikit_learn import KerasClassifier from keras.models import Sequential from keras.layers import Dense from glob import glob import os
code
18159704/cell_3
[ "text_plain_output_1.png" ]
from glob import glob import cv2 import numpy as np train_data1 = glob('../input/fruits-360_dataset/fruits-360/Training/Raspberry/*') train_data2 = glob('../input/fruits-360_dataset/fruits-360/Training/Pomelo Sweetie/*') test_data1 = glob('../input/fruits-360_dataset/fruits-360/Test/Raspberry/*') test_data2 = glob('../input/fruits-360_dataset/fruits-360/Test/Pomelo Sweetie/*') x_train = [] x_test = [] def create_data(data, values): for i in values: im = cv2.imread(i, 0) data.append(im) return len(values) train_Rasberry_size = create_data(x_train, train_data1) train_Pomelo_size = create_data(x_train, train_data2) test_Rasberry_size = create_data(x_test, test_data1) test_Pomelo_size = create_data(x_test, test_data2) x_train = np.asarray(x_train, dtype=np.float64) x_train = x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2]) z = np.zeros(train_Rasberry_size) o = np.ones(train_Pomelo_size) y_train = np.concatenate((z, o), axis=0) print('x_train:', x_train.shape) print('y_train:', y_train.shape)
code
18159704/cell_5
[ "text_plain_output_1.png" ]
from glob import glob from keras.layers import Dense from keras.models import Sequential from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score import cv2 import numpy as np train_data1 = glob('../input/fruits-360_dataset/fruits-360/Training/Raspberry/*') train_data2 = glob('../input/fruits-360_dataset/fruits-360/Training/Pomelo Sweetie/*') test_data1 = glob('../input/fruits-360_dataset/fruits-360/Test/Raspberry/*') test_data2 = glob('../input/fruits-360_dataset/fruits-360/Test/Pomelo Sweetie/*') x_train = [] x_test = [] def create_data(data, values): for i in values: im = cv2.imread(i, 0) data.append(im) return len(values) train_Rasberry_size = create_data(x_train, train_data1) train_Pomelo_size = create_data(x_train, train_data2) test_Rasberry_size = create_data(x_test, test_data1) test_Pomelo_size = create_data(x_test, test_data2) x_train = np.asarray(x_train, dtype=np.float64) x_train = x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2]) z = np.zeros(train_Rasberry_size) o = np.ones(train_Pomelo_size) y_train = np.concatenate((z, o), axis=0) def build_classifier(): classifier = Sequential() classifier.add(Dense(units=16, kernel_initializer='uniform', activation='relu', input_dim=x_train.shape[1])) classifier.add(Dense(units=8, kernel_initializer='uniform', activation='relu')) classifier.add(Dense(units=8, kernel_initializer='uniform', activation='relu')) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return classifier model = KerasClassifier(build_fn=build_classifier, epochs=5, verbose=2) results = cross_val_score(estimator=model, X=x_train, y=y_train, cv=3) print('Accuracy mean:', results.mean()) print('Accuracy variance:', results.std())
code
18135845/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return tsplot(AMZN.AdjClose, lags=30)
code
18135845/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) p('Outputs\n-------------\nMean: {:.3f}\nVariance: {:.3f}\nStandard Deviation: {:.3f}'.format(whnoise.mean(), whnoise.var(), whnoise.std()))
code
18135845/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) np.random.seed(1) n_samples = 1000 x = w = np.random.normal(size=n_samples) for t in range(n_samples): x[t] = x[t - 1] + w[t] tsplot(np.diff(AMZN.AdjClose), lags=30)
code
18135845/cell_11
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) np.random.seed(1) n_samples = 1000 x = w = np.random.normal(size=n_samples) for t in range(n_samples): x[t] = x[t - 1] + w[t] tsplot(x, lags=30)
code
18135845/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) np.random.seed(1) n_samples = 1000 x = w = np.random.normal(size=n_samples) for t in range(n_samples): x[t] = x[t - 1] + w[t] tsplot(np.diff(MSFT.AdjClose), lags=30)
code
18135845/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) tsplot(whnoise, lags=30)
code
18135845/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return tsplot(MSFT.AdjClose, lags=30)
code
18135845/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.tsa.api as smt MSFT = pd.read_csv('../input/Microsoft.csv', header=None) AMZN = pd.read_csv('../input/Amazon.csv', header=None) MSFT['AdjClose'] = pd.read_csv('../input/Microsoft.csv', header=None) AMZN['AdjClose'] = pd.read_csv('../input/Amazon.csv', header=None) def tsplot(y, lags=None, figsize=(15, 15), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.7) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.7) plt.tight_layout() return np.random.seed(1) whnoise = np.random.normal(size=1000) np.random.seed(1) n_samples = 1000 x = w = np.random.normal(size=n_samples) for t in range(n_samples): x[t] = x[t - 1] + w[t] tsplot(np.diff(x), lags=30)
code
17122172/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from gensim.models import KeyedVectors, Word2Vec from matplotlib import pyplot from nltk.tokenize import RegexpTokenizer from sklearn.decomposition import PCA import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv')['Message'].astype('str') tokenizer = RegexpTokenizer('\\w+') data_tokenized = [w.lower() for w in forum_posts.tolist()] data_tokenized = [tokenizer.tokenize(i) for i in data_tokenized] model_2 = Word2Vec(size=300, min_count=1) model_2.build_vocab(data_tokenized) total_examples = model_2.corpus_count model_2.train(data_tokenized, total_examples=total_examples, epochs=1) X = model_2[list(model_2.wv.vocab.keys())[:100]] pca = PCA(n_components=2) result = pca.fit_transform(X) pyplot.scatter(result[:, 0], result[:, 1]) words = list(model_2.wv.vocab.keys())[:100] for i, word in enumerate(words): pyplot.annotate(word, xy=(result[i, 0], result[i, 1])) pyplot.show()
code
17122172/cell_5
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors, Word2Vec from nltk.tokenize import RegexpTokenizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv')['Message'].astype('str') tokenizer = RegexpTokenizer('\\w+') data_tokenized = [w.lower() for w in forum_posts.tolist()] data_tokenized = [tokenizer.tokenize(i) for i in data_tokenized] model_2 = Word2Vec(size=300, min_count=1) model_2.build_vocab(data_tokenized) total_examples = model_2.corpus_count model_2.train(data_tokenized, total_examples=total_examples, epochs=1)
code
72106102/cell_13
[ "text_plain_output_1.png" ]
missing_values = X_train_full.isnull().sum() cat_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() < 10] print('Number of unique category for each categorical Feature') for cols in cat_cols: print(f'{cols}: {X_train_full[cols].nunique()}')
code
72106102/cell_9
[ "text_plain_output_1.png" ]
X_train_full.head()
code
72106102/cell_11
[ "text_html_output_1.png" ]
print(f'Shape of training data: {X_train_full.shape}') missing_values = X_train_full.isnull().sum() print(missing_values[missing_values > 0])
code
72106102/cell_16
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import pandas as pd X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') X_full.dropna(axis=0, subset=['target'], inplace=True) y = X_full['target'] X_full.drop(['target'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2) missing_values = X_train_full.isnull().sum() cat_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() < 10] cat_transformer = OneHotEncoder(handle_unknown='ignore') preprocessor = ColumnTransformer(transformers=[('cat', cat_transformer, cat_cols)]) model = XGBRegressor() clf = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) clf.fit(X_full, y) prediction = clf.predict(X_valid_full) score = mean_absolute_error(prediction, y_valid) print(f'Loss:{score}')
code
72106102/cell_10
[ "text_html_output_1.png" ]
X_train_full.describe()
code
128016720/cell_30
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import random import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') import random classes = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'A-boot'] samples_per_class = 10 num_classes = len(classes) fig, axs = plt.subplots(samples_per_class, num_classes, figsize=(10, 10)) fig.subplots_adjust(hspace=0.5) for i in range(samples_per_class): for j in range(num_classes): cls = classes[j] ind = random.choice((y_train == j).nonzero()[0]) img = X_train[ind].reshape((28, 28)) axs[i, j].imshow(img, cmap=plt.cm.gray) axs[i, j].axis('off') if i == 0: axs[i, j].set_title(cls) plt.show() pipe = make_pipeline(StandardScaler(), KNeighborsClassifier()) param_grid = {'kneighborsclassifier__n_neighbors': range(1, 30)} cv = GridSearchCV(pipe, param_grid=param_grid, cv=5) cv.fit(X_train, y_train) mean_scores = cv.cv_results_['mean_test_score'] sns.lineplot(x=range(1, 30), y=mean_scores) plt.xlabel('K value') plt.ylabel('Mean test score') plt.show()
code
128016720/cell_26
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler pipe = make_pipeline(StandardScaler(), KNeighborsClassifier()) param_grid = {'kneighborsclassifier__n_neighbors': range(1, 30)} cv = GridSearchCV(pipe, param_grid=param_grid, cv=5) cv.fit(X_train, y_train)
code
128016720/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') data['label'].sort_values()
code
128016720/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128016720/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import random import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') import random classes = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'A-boot'] samples_per_class = 10 num_classes = len(classes) fig, axs = plt.subplots(samples_per_class, num_classes, figsize=(10, 10)) fig.subplots_adjust(hspace=0.5) for i in range(samples_per_class): for j in range(num_classes): cls = classes[j] ind = random.choice((y_train == j).nonzero()[0]) img = X_train[ind].reshape((28, 28)) axs[i, j].imshow(img, cmap=plt.cm.gray) axs[i, j].axis('off') if i == 0: axs[i, j].set_title(cls) plt.show() pipe = make_pipeline(StandardScaler(), KNeighborsClassifier()) param_grid = {'kneighborsclassifier__n_neighbors': range(1, 30)} cv = GridSearchCV(pipe, param_grid=param_grid, cv=5) cv.fit(X_train, y_train) mean_scores = cv.cv_results_['mean_test_score'] y_pred = cv.predict(X_test) print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred))
code
128016720/cell_28
[ "image_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler pipe = make_pipeline(StandardScaler(), KNeighborsClassifier()) param_grid = {'kneighborsclassifier__n_neighbors': range(1, 30)} cv = GridSearchCV(pipe, param_grid=param_grid, cv=5) cv.fit(X_train, y_train) print('Best hyperparameters: ', cv.best_params_)
code
128016720/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') plt.figure(figsize=(8, 6)) sns.heatmap(data.corr(), cmap='coolwarm')
code
128016720/cell_15
[ "text_plain_output_1.png" ]
print(f'Training data shape: {X_train.shape}') print(f'Training labels shape: {y_train.shape}') print(f'Test data shape: {X_test.shape}') print(f'Test labels shape: {y_test.shape}')
code
128016720/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import random import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') import random classes = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'A-boot'] samples_per_class = 10 num_classes = len(classes) fig, axs = plt.subplots(samples_per_class, num_classes, figsize=(10, 10)) fig.subplots_adjust(hspace=0.5) for i in range(samples_per_class): for j in range(num_classes): cls = classes[j] ind = random.choice((y_train == j).nonzero()[0]) img = X_train[ind].reshape((28, 28)) axs[i, j].imshow(img, cmap=plt.cm.gray) axs[i, j].axis('off') if i == 0: axs[i, j].set_title(cls) plt.show()
code
128016720/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') if data.isnull().values.any(): print('There is something missing in the data') else: print('Data is complete')
code
128016720/cell_5
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/fashionmnist/fashion-mnist_train.csv') data.head()
code
50244608/cell_13
[ "text_html_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() employees.describe()
code
50244608/cell_9
[ "image_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape
code
50244608/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees['Attrition'].value_counts()
code
50244608/cell_4
[ "text_plain_output_1.png" ]
pip install -U plotly
code
50244608/cell_34
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) correlations = employees.corr() f, ax = plt.subplots(figsize=(20, 20)) sns.heatmap(correlations, annot=True)
code
50244608/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) left = employees[employees['Attrition'] == 1] stayed = employees[employees['Attrition'] == 0] left.describe()
code
50244608/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, classification_report from sklearn.ensemble import RandomForestClassifier import tensorflow as tf import pickle
code
50244608/cell_40
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) left = employees[employees['Attrition'] == 1] stayed = employees[employees['Attrition'] == 0] correlations = employees.corr() f, ax = plt.subplots(figsize = (20,20)) sns.heatmap(correlations, annot=True); sns.set_palette('seismic_r') pd.options.plotting.backend = 'plotly' plt.figure(figsize=(12, 7)) sns.kdeplot(left['TotalWorkingYears'], label='Employees that left', shade=True, color='red') sns.kdeplot(stayed['TotalWorkingYears'], label='Employees that stayed', shade=True, color='c')
code
50244608/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) left = employees[employees['Attrition'] == 1] stayed = employees[employees['Attrition'] == 0] print('Total = ', len(employees)) print('Number of employees that left the company = ', len(left)) print('% of employees that left the company = ', len(left) / len(employees) * 100) print('Employees that stayed in the company = ', len(stayed)) print('% of employees that stayed in the company = ', len(stayed) / len(employees) * 100)
code
50244608/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum()
code
50244608/cell_7
[ "image_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.head()
code
50244608/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() employees['EducationField'].unique()
code
50244608/cell_8
[ "image_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.info()
code
50244608/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() employees['Attrition'].unique()
code
50244608/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() employees['OverTime'].unique()
code
50244608/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) correlations = employees.corr() f, ax = plt.subplots(figsize = (20,20)) sns.heatmap(correlations, annot=True); sns.set_palette('seismic_r') plt.figure(figsize=[20, 20]) plt.subplot(411) sns.countplot(x='JobRole', hue='Attrition', data=employees) plt.subplot(412) sns.countplot(x='MaritalStatus', hue='Attrition', data=employees) plt.subplot(413) sns.countplot(x='JobInvolvement', hue='Attrition', data=employees) plt.subplot(414) sns.countplot(x='JobLevel', hue='Attrition', data=employees)
code
50244608/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() employees['Over18'].unique()
code
50244608/cell_43
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) left = employees[employees['Attrition'] == 1] stayed = employees[employees['Attrition'] == 0] correlations = employees.corr() f, ax = plt.subplots(figsize = (20,20)) sns.heatmap(correlations, annot=True); sns.set_palette('seismic_r') pd.options.plotting.backend = 'plotly' plt.figure(figsize=(12, 7)) sns.kdeplot(left['DistanceFromHome'], label='Employees that left', shade=True, color='k') sns.kdeplot(stayed['DistanceFromHome'], label='Employees that stayed', shade=True, color='b')
code