path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
129014537/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129014537/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94m')
print(train.isna().sum().sort_values(ascending=False)) | code |
129014537/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.describe() | code |
129014537/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.head() | code |
33102708/cell_4 | [
"image_output_5.png",
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
def print_files():
for dirname,_,filname in os.walk('..../kaggle/input'):
for filename in filenames:
print(os.path.join(dirname,filename))
PATH=('../kaggle/input/mp/architecture/MPLA Architecture_png')
image(PATH)
fig=plt.figure()
ax1=fig.add_subplot(axes,row,column)
columns =[confirmed,criticals/fatals,recovered,deaths]
weeks=x_axes
x=weeks
x=[0,1,2,3,4,20,7,5]
columns=[values]
values =[0,10,20,30,80000,40000,20000,10000,1000]
y_axes=values
y=y_axes
ax1.pt(x,y)
fig=plt.fig()
ax1=fig.subplots()
ax.plot(x,y)
fig=plt.figure(figsize+(15,15))
ax=fig.add_subplot()
ax.plot(x,color=red,alpha=0.5)
plt.xlim(x.min()*1.5,x.max()*1.5)
plt.ylim(c.min()*1.5,c.max()*1.5)
plt.scatter(x,50,color=green,alpha=0.5)
plt.annotate((x_axes,y_axes),fontsize=16)
plt.show()
#Merge all the csv's/concatenate all the csv's
#Write the concatenate csv's into a single csv
def value (last_update):
last_update = 3/30/2020
for value in ('lastupdate'):
columns =['Total_confirmed_cases,(Criticals_cases/Fatals_cases),Recovered_cases,Deaths_cases']
Total_confirmed_cases =65
Recovered_cases=64
Deaths_cases=1
weeks='x_axes'
x=weeks
x=[0,1,2,3,4,20,7,5]
columns=['values']
values =[0,10,20,30,80000,40000,20000,10000,1000]
y_axes=values
y=y_axes
'List.append(value)'
print('result')
print('List.update(value)')
print('List.append(value)')
print(['Suspected_cases'])
print(['Confirmed_cases'])
print(['Critical_cases'])
print(['Recovered_cases'])
print(['Death_cases'])
#UPDATE TOTAL CONFIRMED, RECOVERED, DEATHS, FATAL, SUSPECTED
confirmed =('confirmed[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('confirmed_values')
Critical_cases = ('Critical_cases[[province/state,last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('critical/fatal_values')
recovered = ('recovered[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('recovered_values')
Death_cases= ('death[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('suspected_values')
suspected = ('suspected[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('death_values')
import matplotlib.pyplot as plt
#Renaming column
Nigeria_cases = ('Nigerian_cases.rename(column={last_update:confirmed,suspected:suspected,fatal:fatal,recovered:recovered,deaths:deaths)')
#Nigeria_cases.Confirmed
plt.plot('kind=barh, figsize=(70,30), color=[green, lime], Width=1, rotation=2')
plt.title('Total_confirmed_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.suspected
plt.plot('kind=barh, figsize=(70,30), color=[purple, lime], Width=1, rotation=2')
plt.title('Total_suspected_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.death
plt.plot('kind=barh, figsize=(70,30), color=[red, lime], Width=1, rotation=2')
plt.title('Total_death_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.recovered
plt.plot('kind=barh, figsize=(70,30), color=[magenta,lime], Width=1, rotation=2')
plt.title('Total_recovered_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.critical
plt.plot('kind=barh, figsize=(70,30), color=[blue, lime], Width=1, rotation=2')
plt.title('Total_critical_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
'df'
def values(Today_updates):
Today_updates = 4 / 28 / 2020
Today = 'new_update'
for value in 'new_update':
columns = ['confirmed,criticals/fatals,recovered,deaths']
Total_confirmed_cases = 1532
deaths = 44
weeks = 'x_axes'
x = weeks
columns = ['Total_confirmed_cases,(Criticals_cases/Fatals_cases),Recovered_cases,Deaths_cases']
Total_confirmed_cases = 65
Recovered_cases = 64
Deaths_cases = 1
weeks = 'x_axes'
x = weeks
x = [0, 1, 2, 3, 4, 20, 7, 5]
columns = ['values']
values = [0, 10, 20, 30, 100000, 40000, 20000, 10000, 1000]
y_axes = values
y = y_axes
'List.append(value)'
print('result')
print('List.update(value)')
print('List.append(value)')
print(['Suspected_cases'])
print(['Confirmed_cases'])
print(['Critical_cases'])
print(['Recovered_cases'])
print(['Death_cases'])
confirmed = 'confirmed[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('confirmed_values')
Critical_cases = 'Critical_cases[[province/state,last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('critical/fatal_values')
recovered = 'recovered[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('recovered_values')
Death_cases = 'death[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('suspected_values')
suspected = 'suspected[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('death_values')
import matplotlib.pyplot as plt
Nigeria_cases = 'Nigerian_cases.rename(column={last_update:confirmed,suspected:suspected,fatal:fatal,recovered:recovered,deaths:deaths)'
plt.plot('kind=barh, figsize=(70,30), color=[green, lime], Width=1, rotation=2')
plt.title('Total_confirmed_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[purple, lime], Width=1, rotation=2')
plt.title('Total_suspected_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[red, lime], Width=1, rotation=2')
plt.title('Total_death_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[magenta,lime], Width=1, rotation=2')
plt.title('Total_recovered_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[blue, lime], Width=1, rotation=2')
plt.title('Total_critical_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
'df' | code |
33102708/cell_2 | [
"text_plain_output_1.png"
] | request = 'request.get(http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/confirmed.csv)'
request = 'download'
download = '....../input/http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/confirmed.csv'
df = 'download'
print(df)
request = 'request.get(http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/recovered.csv)'
request = 'download'
download = '....../input/http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/recovered.csv'
df = 'download'
print(df)
request = 'request.get(http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/fatal.csv)'
request = 'download'
download = '....../input/http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/fatal.csv'
df = 'download'
print(df)
request = 'request.get(http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/death.csv)'
request = 'download'
download = '....../input/http://raw.githubusercontent.com/CSSEGIS.SandData/COVID-19/master/cssc_COVID-19/death.csv'
df = 'download'
print(df)
request = 'request.get(http://kaggle /corona_global_forecasting/kernel_COVID-19/submission_csv_file.csv)'
request = 'download'
download = '....../input/http://kaggle /corona_global_forecasting/kernel_COVID-19/submission_csv_file.csv'
df = 'download'
print(df) | code |
33102708/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import cmath
import os.path
import scipy as integrate
import numpy as np
import pandas as pd
from pandas import DataFrame as df
import pywaffle
import joypy
from dateutil.parser import parse | code |
33102708/cell_3 | [
"image_output_5.png",
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
def print_files():
for dirname, _, filname in os.walk('..../kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
PATH = '../kaggle/input/mp/architecture/MPLA Architecture_png'
image(PATH)
fig = plt.figure()
ax1 = fig.add_subplot(axes, row, column)
columns = [confirmed, criticals / fatals, recovered, deaths]
weeks = x_axes
x = weeks
x = [0, 1, 2, 3, 4, 20, 7, 5]
columns = [values]
values = [0, 10, 20, 30, 80000, 40000, 20000, 10000, 1000]
y_axes = values
y = y_axes
ax1.pt(x, y)
fig = plt.fig()
ax1 = fig.subplots()
ax.plot(x, y)
fig = plt.figure(figsize + (15, 15))
ax = fig.add_subplot()
ax.plot(x, color=red, alpha=0.5)
plt.xlim(x.min() * 1.5, x.max() * 1.5)
plt.ylim(c.min() * 1.5, c.max() * 1.5)
plt.scatter(x, 50, color=green, alpha=0.5)
plt.annotate((x_axes, y_axes), fontsize=16)
plt.show()
def value(last_update):
last_update = 3 / 30 / 2020
for value in 'lastupdate':
columns = ['Total_confirmed_cases,(Criticals_cases/Fatals_cases),Recovered_cases,Deaths_cases']
Total_confirmed_cases = 65
Recovered_cases = 64
Deaths_cases = 1
weeks = 'x_axes'
x = weeks
x = [0, 1, 2, 3, 4, 20, 7, 5]
columns = ['values']
values = [0, 10, 20, 30, 80000, 40000, 20000, 10000, 1000]
y_axes = values
y = y_axes
'List.append(value)'
print('result')
print('List.update(value)')
print('List.append(value)')
print(['Suspected_cases'])
print(['Confirmed_cases'])
print(['Critical_cases'])
print(['Recovered_cases'])
print(['Death_cases'])
confirmed = 'confirmed[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('confirmed_values')
Critical_cases = 'Critical_cases[[province/state,last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('critical/fatal_values')
recovered = 'recovered[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('recovered_values')
Death_cases = 'death[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('suspected_values')
suspected = 'suspected[[province/state, last_update],[country/Region]]==Nigeria'
print('result')
print(values)
print('death_values')
import matplotlib.pyplot as plt
Nigeria_cases = 'Nigerian_cases.rename(column={last_update:confirmed,suspected:suspected,fatal:fatal,recovered:recovered,deaths:deaths)'
plt.plot('kind=barh, figsize=(70,30), color=[green, lime], Width=1, rotation=2')
plt.title('Total_confirmed_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[purple, lime], Width=1, rotation=2')
plt.title('Total_suspected_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[red, lime], Width=1, rotation=2')
plt.title('Total_death_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[magenta,lime], Width=1, rotation=2')
plt.title('Total_recovered_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
plt.plot('kind=barh, figsize=(70,30), color=[blue, lime], Width=1, rotation=2')
plt.title('Total_critical_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
'df' | code |
33102708/cell_5 | [
"image_output_5.png",
"image_output_4.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
def print_files():
for dirname,_,filname in os.walk('..../kaggle/input'):
for filename in filenames:
print(os.path.join(dirname,filename))
PATH=('../kaggle/input/mp/architecture/MPLA Architecture_png')
image(PATH)
fig=plt.figure()
ax1=fig.add_subplot(axes,row,column)
columns =[confirmed,criticals/fatals,recovered,deaths]
weeks=x_axes
x=weeks
x=[0,1,2,3,4,20,7,5]
columns=[values]
values =[0,10,20,30,80000,40000,20000,10000,1000]
y_axes=values
y=y_axes
ax1.pt(x,y)
fig=plt.fig()
ax1=fig.subplots()
ax.plot(x,y)
fig=plt.figure(figsize+(15,15))
ax=fig.add_subplot()
ax.plot(x,color=red,alpha=0.5)
plt.xlim(x.min()*1.5,x.max()*1.5)
plt.ylim(c.min()*1.5,c.max()*1.5)
plt.scatter(x,50,color=green,alpha=0.5)
plt.annotate((x_axes,y_axes),fontsize=16)
plt.show()
#Merge all the csv's/concatenate all the csv's
#Write the concatenate csv's into a single csv
def value (last_update):
last_update = 3/30/2020
for value in ('lastupdate'):
columns =['Total_confirmed_cases,(Criticals_cases/Fatals_cases),Recovered_cases,Deaths_cases']
Total_confirmed_cases =65
Recovered_cases=64
Deaths_cases=1
weeks='x_axes'
x=weeks
x=[0,1,2,3,4,20,7,5]
columns=['values']
values =[0,10,20,30,80000,40000,20000,10000,1000]
y_axes=values
y=y_axes
'List.append(value)'
print('result')
print('List.update(value)')
print('List.append(value)')
print(['Suspected_cases'])
print(['Confirmed_cases'])
print(['Critical_cases'])
print(['Recovered_cases'])
print(['Death_cases'])
#UPDATE TOTAL CONFIRMED, RECOVERED, DEATHS, FATAL, SUSPECTED
confirmed =('confirmed[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('confirmed_values')
Critical_cases = ('Critical_cases[[province/state,last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('critical/fatal_values')
recovered = ('recovered[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('recovered_values')
Death_cases= ('death[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('suspected_values')
suspected = ('suspected[[province/state, last_update],[country/Region]]==Nigeria')
print('result')
print(values)
print('death_values')
import matplotlib.pyplot as plt
#Renaming column
Nigeria_cases = ('Nigerian_cases.rename(column={last_update:confirmed,suspected:suspected,fatal:fatal,recovered:recovered,deaths:deaths)')
#Nigeria_cases.Confirmed
plt.plot('kind=barh, figsize=(70,30), color=[green, lime], Width=1, rotation=2')
plt.title('Total_confirmed_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.suspected
plt.plot('kind=barh, figsize=(70,30), color=[purple, lime], Width=1, rotation=2')
plt.title('Total_suspected_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.death
plt.plot('kind=barh, figsize=(70,30), color=[red, lime], Width=1, rotation=2')
plt.title('Total_death_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.recovered
plt.plot('kind=barh, figsize=(70,30), color=[magenta,lime], Width=1, rotation=2')
plt.title('Total_recovered_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
#Nigeria_cases.critical
plt.plot('kind=barh, figsize=(70,30), color=[blue, lime], Width=1, rotation=2')
plt.title('Total_critical_cases by province/state in Nigeria', size=40)
plt.ylabel('province/state', size=30)
plt.yticks(size=20)
plt.xticks(size=20)
plt.show()
'df'
def values(Today_updates):
Today_updates = 4 / 28 / 2020
Today = 'new_update'
for value in 'new_update':
columns = ['confirmed,criticals/fatals,recovered,deaths']
Total_confirmed_cases = 1532
deaths = 44
weeks = 'x_axes'
x = weeks
columns = ['Total_confirmed_cases,(Criticals_cases/Fatals_cases),Recovered_cases,Deaths_cases']
Total_confirmed_cases = 65
Recovered_cases = 64
Deaths_cases = 1
weeks = 'x_axes'
x = weeks
x = [0, 1, 2, 3, 4, 20, 7, 5]
columns = ['values']
values = [0, 10, 20, 30, 100000, 40000, 20000, 10000, 1000]
y_axes = values
y = y_axes
'List.append(value)'
confirmed = 'confirmed[[province/state, last_update],[country/Region]]==Nigeria'
Critical_cases = 'Critical_cases[[province/state,last_update],[country/Region]]==Nigeria'
recovered = 'recovered[[province/state, last_update],[country/Region]]==Nigeria'
Death_cases = 'death[[province/state, last_update],[country/Region]]==Nigeria'
suspected = 'suspected[[province/state, last_update],[country/Region]]==Nigeria'
import matplotlib.pyplot as plt
Nigeria_cases = 'Nigerian_cases.rename(column={last_update:confirmed,suspected:suspected,fatal:fatal,recovered:recovered,deaths:deaths)'
plt.yticks(size=20)
plt.xticks(size=20)
plt.yticks(size=20)
plt.xticks(size=20)
plt.yticks(size=20)
plt.xticks(size=20)
plt.yticks(size=20)
plt.xticks(size=20)
plt.yticks(size=20)
plt.xticks(size=20)
'df'
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame as df
startdate = 1 / 19 / 20
transmission = 'local_transmission'
local_transmission = 3
confirmed_Nigeria = 'confirmed[confirmed[country/region]==Nigeria'
confirmed_Nigeria = 'confirmed_Nigeria(group_by(confirmed_Nigeria[region])).sum()'
Confirmed_Nigeria_Cases = 'Confirmed_Nigeria_Cases.iloc[0][2:confirmed_Nigeria.shape[1]]'
plt.plot('kind=Scattered, figsize=(20,50), color=1, rotation=2')
plt.plot('confirmed_Nigeria', color='green', label='confirmed_cases')
plt.title('Confirmed_Nigeria overline in Nigeria', size=30)
plt.ylabel('Confirmed_cases', size=20)
plt.xlabel('Updates', size=20)
plt.yticks(rotation=90, size=15)
plt.xticks(size=15)
plt.plot('Nigeria', color='green', label='Nigeria')
plt.show()
recovered_Nigeria_cases = 'recovered[recovered[country]==Nigeria'
recovered_Nigeria_cases = 'recovered_Nigeria.groupby(recovered_Nigeria[region]).sum()'
recovered_Nigeria_cases = 'recovered_Nigeria.iloc[0][2:confirmed_Nigeria.shape[1]]'
plt.plot('kind=Scattered, figsize=(20,50), color=1, rotation=2')
plt.plot('recovered_Nigeria', color='magenta', label='Recovered_cases')
plt.title('Recovered_Nigeria overline in Nigeria', size=30)
plt.ylabel('Rcovered_cases', size=20)
plt.xlabel('Updates', size=20)
plt.yticks(rotation=90, size=15)
plt.xticks(size=15)
plt.plot('Nigeria', color='magenta', label='Nigeria')
plt.show()
critical_Nigeria_cases = 'critical[critical[country]==Nigeria'
critical_Nigeria_cases_cases = 'critical_Nigeria.groupby(critical_Nigeria[region]).sum()'
critical_Nigeria_cases = 'critical_Nigeria.iloc[0][2:critical_Nigeria.shape[1]]'
plt.plot('kind=Scattered, figsize=(20,50), color=1, rotation=2')
plt.plot('critical_Nigeria', color='blue', label='critical_cases')
plt.title('Critical_Nigeria overline in Nigeria', size=30)
plt.ylabel('Critical_cases', size=20)
plt.xlabel('Updates', size=20)
plt.yticks(rotation=90, size=15)
plt.xticks(size=15)
plt.plot('Nigeria', color='blue', label='Nigeria')
plt.show()
suspected_Nigeria = 'suspected[suspected[country]==Nigeria'
suspected_Nigeria = 'suspected_Nigeria.groupby(suspected_Nigeria[region]).sum()'
suspected_Nigeria = 'suspected_Nigeria.iloc[0][2:suspected_Nigeria.shape[1]]'
plt.plot('kind=Scattered, figsize=(20,50), color=1, rotation=2')
plt.plot('suspected_Nigeria', color='purple', label='Suspected_cases')
plt.title('Suspected_Nigeria overline in Nigeria', size=30)
plt.ylabel('Suspected_cases', size=20)
plt.xlabel('Updates', size=20)
plt.yticks(rotation=90, size=15)
plt.xticks(size=15)
plt.plot('Nigeria', color='purple', label='Nigeria')
plt.show()
death_Nigeria = 'death[death[country]==Nigeria'
Death_Nigeria_Cases = 'death_Nigeria.groupby(death_Nigeria[region]).sum()'
Death_Nigeria_cases = 'death_Nigeria.iloc[0][2:confirmed_Nigeria.shape[1]]'
plt.plot('kind=Scattered, figsize=(20,50), color=1, rotation=2')
plt.plot('Death_Nigeria_Cases', color='red', label='Deaths_cases')
plt.title('Death_Nigeria overline in Nigeria', size=30)
plt.ylabel('Death_cases', size=20)
plt.xlabel('Updates', size=20)
plt.yticks(rotation=90, size=15)
plt.xticks(size=15)
plt.plot('Nigeria', color='red', label='Nigeria')
plt.show()
'df' | code |
2025278/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
LR = LinearRegression()
y = Housetrain2.SalePrice
X = Housetrain2.drop('SalePrice', axis=1)
LR.fit(X, y) | code |
2025278/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Housetrain = pd.read_csv('../input/train.csv')
Housetrain.isnull().sum(axis=0)
Housetrain1 = Housetrain.dropna(axis=1, how='any')
Housetrain1 | code |
2025278/cell_23 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
LR = LinearRegression()
y = Housetrain2.SalePrice
X = Housetrain2.drop('SalePrice', axis=1)
LR.fit(X, y)
LR.score(X, y)
LR | code |
2025278/cell_20 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Housetrain = pd.read_csv('../input/train.csv')
Housetrain.isnull().sum(axis=0)
Housetrain1 = Housetrain.dropna(axis=1, how='any')
y = Housetrain2.SalePrice
X = Housetrain2.drop('SalePrice', axis=1)
le = LabelEncoder()
Housetrain2 = Housetrain1.apply(le.fit_transform)
Housetrain2.describe().transpose() | code |
2025278/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Housetrain = pd.read_csv('../input/train.csv')
Housetest = pd.read_csv('../input/test.csv')
Housetest.head() | code |
2025278/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2025278/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Housetrain = pd.read_csv('../input/train.csv')
Housetrain.isnull().sum(axis=0) | code |
2025278/cell_16 | [
"text_plain_output_1.png"
] | y = Housetrain2.SalePrice
X = Housetrain2.drop('SalePrice', axis=1) | code |
2025278/cell_22 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
LR = LinearRegression()
y = Housetrain2.SalePrice
X = Housetrain2.drop('SalePrice', axis=1)
LR.fit(X, y)
LR.score(X, y) | code |
2025278/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Housetrain = pd.read_csv('../input/train.csv')
Housetrain.head() | code |
34129676/cell_21 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary() | code |
34129676/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
print('Training size: {}'.format(train_df.shape))
print('Validation size: {}'.format(valid_df.shape)) | code |
34129676/cell_34 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
for i in range(0, 15):
for x, y in sample_generator:
image = x[0]
break
plt.tight_layout()
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr])
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))
ax1.plot(hist.history['loss'], color='b', label='Training loss')
ax1.plot(hist.history['val_loss'], color='r', label='Validation loss')
ax1.set_xticks(np.arange(1, EPOCHS, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(hist.history['accuracy'], color='b', label='Training loss')
ax2.plot(hist.history['val_accuracy'], color='r', label='Validation loss')
ax2.set_xticks(np.arange(1, EPOCHS, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show()
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/test/'
data_dir_test_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_test_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df_test = pd.DataFrame(lists, columns=['image'])
df_test['label'] = np.where(df_test['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(directory=data_dir, target_size=(img_width, img_height), color_mode='rgb', batch_size=batch_size, class_mode=None, shuffle=False)
test_size = df_test.shape[0] | code |
34129676/cell_6 | [
"text_html_output_1.png"
] | import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
df['label'].value_counts().plot.bar() | code |
34129676/cell_39 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
for i in range(0, 15):
for x, y in sample_generator:
image = x[0]
break
plt.tight_layout()
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr])
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))
ax1.plot(hist.history['loss'], color='b', label='Training loss')
ax1.plot(hist.history['val_loss'], color='r', label='Validation loss')
ax1.set_xticks(np.arange(1, EPOCHS, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(hist.history['accuracy'], color='b', label='Training loss')
ax2.plot(hist.history['val_accuracy'], color='r', label='Validation loss')
ax2.set_xticks(np.arange(1, EPOCHS, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show()
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/test/'
data_dir_test_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_test_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df_test = pd.DataFrame(lists, columns=['image'])
df_test['label'] = np.where(df_test['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(directory=data_dir, target_size=(img_width, img_height), color_mode='rgb', batch_size=batch_size, class_mode=None, shuffle=False)
test_size = df_test.shape[0]
sample_test = df_test.head(18)
sample_test.head()
plt.figure(figsize=(10,10))
for index,row in sample_test.iterrows():
image = row['image']
pred = row['label']
img = load_img(data_dir + image)
plt.subplot(6,3,index+1)
plt.imshow(img)
plt.xlabel(pred)
plt.tight_layout()
plt.show()
print(df_test.count)
print(df_test['match'].value_counts())
df_test['match'].value_counts().plot.bar() | code |
34129676/cell_11 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
valid_df['label'].value_counts().plot.bar() | code |
34129676/cell_7 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
plt.subplot(1, 2, 1)
plt.title(sample.iloc[0]['label'])
plt.imshow(image)
image = load_img(data_dir + sample.iloc[1]['image'])
plt.subplot(1, 2, 2)
plt.title(sample.iloc[1]['label'])
plt.imshow(image)
plt.show() | code |
34129676/cell_18 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
plt.figure(figsize=(12, 12))
for i in range(0, 15):
plt.subplot(5, 3, i + 1)
for x, y in sample_generator:
image = x[0]
plt.imshow(image)
break
plt.tight_layout()
plt.show() | code |
34129676/cell_15 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb') | code |
34129676/cell_38 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
for i in range(0, 15):
for x, y in sample_generator:
image = x[0]
break
plt.tight_layout()
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr])
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))
ax1.plot(hist.history['loss'], color='b', label='Training loss')
ax1.plot(hist.history['val_loss'], color='r', label='Validation loss')
ax1.set_xticks(np.arange(1, EPOCHS, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(hist.history['accuracy'], color='b', label='Training loss')
ax2.plot(hist.history['val_accuracy'], color='r', label='Validation loss')
ax2.set_xticks(np.arange(1, EPOCHS, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show()
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/test/'
data_dir_test_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_test_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df_test = pd.DataFrame(lists, columns=['image'])
df_test['label'] = np.where(df_test['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(directory=data_dir, target_size=(img_width, img_height), color_mode='rgb', batch_size=batch_size, class_mode=None, shuffle=False)
test_size = df_test.shape[0]
sample_test = df_test.head(18)
sample_test.head()
plt.figure(figsize=(10, 10))
for index, row in sample_test.iterrows():
image = row['image']
pred = row['label']
img = load_img(data_dir + image)
plt.subplot(6, 3, index + 1)
plt.imshow(img)
plt.xlabel(pred)
plt.tight_layout()
plt.show() | code |
34129676/cell_3 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from collections import Counter
from keras import backend as K
from keras import optimizers
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os | code |
34129676/cell_17 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb') | code |
34129676/cell_31 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
for i in range(0, 15):
for x, y in sample_generator:
image = x[0]
break
plt.tight_layout()
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr])
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10))
ax1.plot(hist.history['loss'], color='b', label='Training loss')
ax1.plot(hist.history['val_loss'], color='r', label='Validation loss')
ax1.set_xticks(np.arange(1, EPOCHS, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(hist.history['accuracy'], color='b', label='Training loss')
ax2.plot(hist.history['val_accuracy'], color='r', label='Validation loss')
ax2.set_xticks(np.arange(1, EPOCHS, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show() | code |
34129676/cell_14 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb') | code |
34129676/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
train_df['label'].value_counts().plot.bar() | code |
34129676/cell_27 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr]) | code |
34129676/cell_37 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/train/'
data_dir_train_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_train_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/train/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df = pd.DataFrame(lists, columns=['image'])
df['label'] = np.where(df['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
sample = df.sample(2)
image = load_img(data_dir + sample.iloc[0]['image'])
image = load_img(data_dir + sample.iloc[1]['image'])
train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)
img_width, img_height = (204, 204)
batch_size = 64
num_classes = 2
input_shape = (img_width, img_height, 3)
EPOCHS = 10
train_datagen = ImageDataGenerator(rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1, rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_dataframe(dataframe=valid_df, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
sample = train_df.sample(1, random_state=42)
sample_generator = train_datagen.flow_from_dataframe(dataframe=sample, directory=data_dir, x_col='image', y_col='label', target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical', color_mode='rgb')
for i in range(0, 15):
for x, y in sample_generator:
image = x[0]
break
plt.tight_layout()
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, mode='min', verbose=1)
checkpointer = ModelCheckpoint(filepath='/kaggle/working/models/model.{epoch:02d}-{val_loss:.6f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=10, min_lr=0, verbose=1)
hist = model.fit_generator(train_generator, steps_per_epoch=np.ceil(len(train_df) / batch_size), epochs=EPOCHS, validation_data=validation_generator, validation_steps=np.ceil(len(valid_df) / batch_size), workers=8, max_queue_size=15, callbacks=[earlystopper, checkpointer, reduce_lr])
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))
ax1.plot(hist.history['loss'], color='b', label='Training loss')
ax1.plot(hist.history['val_loss'], color='r', label='Validation loss')
ax1.set_xticks(np.arange(1, EPOCHS, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(hist.history['accuracy'], color='b', label='Training loss')
ax2.plot(hist.history['val_accuracy'], color='r', label='Validation loss')
ax2.set_xticks(np.arange(1, EPOCHS, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
plt.show()
data_dir = '/kaggle/input/lego-vs-unknown-cropped/VIA_dataset_cropped/test/'
data_dir_test_lego = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/LEGO'
test_lego = ['LEGO/' + f for f in os.listdir(data_dir_test_lego)]
data_dir_test_Unknown = '/kaggle/input//lego-vs-unknown-cropped/VIA_dataset_cropped/test/Unknown'
test_Unknown = ['Unknown/' + f for f in os.listdir(data_dir_test_Unknown)]
lists = test_lego + test_Unknown
df_test = pd.DataFrame(lists, columns=['image'])
df_test['label'] = np.where(df_test['image'].str.contains('LEGO'), 'LEGO', 'Unknown')
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(directory=data_dir, target_size=(img_width, img_height), color_mode='rgb', batch_size=batch_size, class_mode=None, shuffle=False)
test_size = df_test.shape[0]
df_test | code |
129013037/cell_42 | [
"image_output_1.png"
] | from mlxtend.plotting import plot_decision_regions
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scores.append(score)
plt.figure(figsize=(12, 3))
sns.lineplot(x=map(str, range(1, 11)), y=scores, marker='o', markersize=10) | code |
129013037/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train, palette='viridis')
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette='rocket_r') | code |
129013037/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum() | code |
129013037/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from sklearn.inspection import DecisionBoundaryDisplay
disp = DecisionBoundaryDisplay.from_estimator(clf, X_test, response_method='predict', alpha=0.7)
disp.ax_.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor='yellow') | code |
129013037/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data['Protein_(g)']) | code |
129013037/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
y | code |
129013037/cell_30 | [
"image_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test) | code |
129013037/cell_44 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scores.append(score)
clf = KNeighborsClassifier(metric='manhattan')
clf.fit(X_train, y_train)
clf.score(X_test, y_test) | code |
129013037/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data['Protein_(g)']) | code |
129013037/cell_40 | [
"text_plain_output_1.png"
] | from mlxtend.plotting import plot_decision_regions
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2) | code |
129013037/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test) | code |
129013037/cell_48 | [
"image_output_1.png"
] | from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
params = {'n_neighbors': range(1, 30), 'metric': ['l1', 'l2']}
best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params)
best_clf.fit(X_train, y_train)
best_clf.score(X_test, y_test) | code |
129013037/cell_41 | [
"image_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(k, score)
scores.append(score) | code |
129013037/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.describe() | code |
129013037/cell_52 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
params = {'n_neighbors': range(1, 30), 'metric': ['l1', 'l2']}
best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params)
best_clf.fit(X_train, y_train)
best_clf.score(X_test, y_test)
best_clf.best_params_
y_best_clf = best_clf.predict(X_test)
print(classification_report(y_test, y_best_clf)) | code |
129013037/cell_1 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train | code |
129013037/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
train.info() | code |
129013037/cell_49 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
params = {'n_neighbors': range(1, 30), 'metric': ['l1', 'l2']}
best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params)
best_clf.fit(X_train, y_train)
best_clf.score(X_test, y_test)
best_clf.best_params_ | code |
129013037/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) | code |
129013037/cell_51 | [
"image_output_1.png"
] | from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scores.append(score)
clf = KNeighborsClassifier(metric='manhattan')
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
y_clf = clf.predict(X_test)
print(classification_report(y_test, y_clf)) | code |
129013037/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from mlxtend.plotting import plot_decision_regions
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2) | code |
129013037/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.info() | code |
129013037/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
y | code |
129013037/cell_38 | [
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train)
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette='tab10') | code |
129013037/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum() | code |
129013037/cell_31 | [
"image_output_1.png"
] | from mlxtend.plotting import plot_decision_regions
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2);
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2) | code |
129013037/cell_22 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test) | code |
129013037/cell_10 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
train['Ash_(g)'] = train['Ash_(g)'].fillna(train['Ash_(g)'].median())
train['Water_(g)'] = train['Water_(g)'].fillna(train['Water_(g)'].median())
train['Fiber_TD_(g)'] = train['Fiber_TD_(g)'].fillna(train['Fiber_TD_(g)'].median())
train['Sugar_Tot_(g)'] = train['Sugar_Tot_(g)'].fillna(train['Sugar_Tot_(g)'].median())
train['Calcium_(mg)'] = train['Calcium_(mg)'].fillna(train['Calcium_(mg)'].median())
train['Iron_(mg)'] = train['Iron_(mg)'].fillna(train['Iron_(mg)'].median())
train['Magnesium_(mg)'] = train['Magnesium_(mg)'].fillna(train['Magnesium_(mg)'].median())
train['Phosphorus_(mg)'] = train['Phosphorus_(mg)'].fillna(train['Phosphorus_(mg)'].median())
train['Potassium_(mg)'] = train['Potassium_(mg)'].fillna(train['Potassium_(mg)'].median())
train['Sodium_(mg)'] = train['Sodium_(mg)'].fillna(train['Sodium_(mg)'].median())
train['Zinc_(mg)'] = train['Zinc_(mg)'].fillna(train['Zinc_(mg)'].median())
train['Copper_mg)'] = train['Copper_mg)'].fillna(train['Copper_mg)'].median())
train['Manganese_(mg)'] = train['Manganese_(mg)'].fillna(train['Manganese_(mg)'].median())
train['Selenium_(µg)'] = train['Selenium_(µg)'].fillna(train['Selenium_(µg)'].median())
train['Vit_C_(mg)'] = train['Vit_C_(mg)'].fillna(train['Vit_C_(mg)'].median())
train['Thiamin_(mg)'] = train['Thiamin_(mg)'].fillna(train['Thiamin_(mg)'].median())
train['Riboflavin_(mg)'] = train['Riboflavin_(mg)'].fillna(train['Riboflavin_(mg)'].median())
train['Niacin_(mg)'] = train['Niacin_(mg)'].fillna(train['Niacin_(mg)'].median())
train['Panto_Acid_mg)'] = train['Panto_Acid_mg)'].fillna(train['Panto_Acid_mg)'].median())
train['Vit_B6_(mg)'] = train['Vit_B6_(mg)'].fillna(train['Vit_B6_(mg)'].median())
train['Folate_Tot_(µg)'] = train['Folate_Tot_(µg)'].fillna(train['Folate_Tot_(µg)'].median())
train['Folic_Acid_(µg)'] = train['Folic_Acid_(µg)'].fillna(train['Folic_Acid_(µg)'].median())
train['Food_Folate_(µg)'] = train['Food_Folate_(µg)'].fillna(train['Food_Folate_(µg)'].median())
train['Folate_DFE_(µg)'] = train['Folate_DFE_(µg)'].fillna(train['Folate_DFE_(µg)'].median())
train['Choline_Tot_ (mg)'] = train['Choline_Tot_ (mg)'].fillna(train['Choline_Tot_ (mg)'].median())
train['Vit_B12_(µg)'] = train['Vit_B12_(µg)'].fillna(train['Vit_B12_(µg)'].median())
train['Vit_A_IU'] = train['Vit_A_IU'].fillna(train['Vit_A_IU'].median())
train['Vit_A_RAE'] = train['Vit_A_RAE'].fillna(train['Vit_A_RAE'].median())
train['Retinol_(µg)'] = train['Retinol_(µg)'].fillna(train['Retinol_(µg)'].median())
train['Alpha_Carot_(µg)'] = train['Alpha_Carot_(µg)'].fillna(train['Alpha_Carot_(µg)'].median())
train['Beta_Carot_(µg)'] = train['Beta_Carot_(µg)'].fillna(train['Beta_Carot_(µg)'].median())
train['Beta_Crypt_(µg)'] = train['Beta_Crypt_(µg)'].fillna(train['Beta_Crypt_(µg)'].median())
train['Lycopene_(µg)'] = train['Lycopene_(µg)'].fillna(train['Lycopene_(µg)'].median())
train['Lut+Zea_ (µg)'] = train['Lut+Zea_ (µg)'].fillna(train['Lut+Zea_ (µg)'].median())
train['Vit_E_(mg)'] = train['Vit_E_(mg)'].fillna(train['Vit_E_(mg)'].median())
train['Vit_D_µg'] = train['Vit_D_µg'].fillna(train['Vit_D_µg'].median())
train['Vit_D_IU'] = train['Vit_D_IU'].fillna(train['Vit_D_IU'].median())
train['Vit_K_(µg)'] = train['Vit_K_(µg)'].fillna(train['Vit_K_(µg)'].median())
train['FA_Sat_(g)'] = train['FA_Sat_(g)'].fillna(train['FA_Sat_(g)'].median())
train['FA_Mono_(g)'] = train['FA_Mono_(g)'].fillna(train['FA_Mono_(g)'].median())
train['FA_Poly_(g)'] = train['FA_Poly_(g)'].fillna(train['FA_Poly_(g)'].median())
train['Cholestrl_(mg)'] = train['Cholestrl_(mg)'].fillna(train['Cholestrl_(mg)'].median())
train['GmWt_1'] = train['GmWt_1'].fillna(train['GmWt_1'].median())
train['GmWt_2'] = train['GmWt_2'].fillna(train['GmWt_2'].median())
train['Refuse_Pct'] = train['Refuse_Pct'].fillna(train['Refuse_Pct'].median()) | code |
129013037/cell_36 | [
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import seaborn as sns
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel('/kaggle/input/products/ABBREV_with_CLASS.xlsx')
train
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
Q1 = train['Protein_(g)'].quantile(0.25)
Q3 = train['Protein_(g)'].quantile(0.75)
IQR = Q3 - Q1
train = train[(train['Protein_(g)'] >= Q1 - 1.5 * IQR) & (train['Protein_(g)'] <= Q3 + 1.5 * IQR)]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train['Shrt_Desc'] = labelencoder_Shrt_Desc.fit_transform(train['Shrt_Desc'])
labelencoder_GmWt_Desc1 = LabelEncoder()
train['GmWt_Desc1'] = labelencoder_GmWt_Desc1.fit_transform(train['GmWt_Desc1'])
labelencoder_GmWt_Desc2 = LabelEncoder()
train['GmWt_Desc2'] = labelencoder_GmWt_Desc2.fit_transform(train['GmWt_Desc2'])
train.isnull().sum()
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
centers = [[1, 1], [-1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop('CLASS', axis=1)
y = train['CLASS']
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) | code |
129024934/cell_21 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df | code |
129024934/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser1 | code |
129024934/cell_25 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A'] | code |
129024934/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data) | code |
129024934/cell_57 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1)
df3.dropna()
df3.dropna(thresh=2)
df3 | code |
129024934/cell_56 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1)
df3.dropna()
df3.dropna(thresh=2) | code |
129024934/cell_30 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
booldf = df > 0
booldf | code |
129024934/cell_33 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df[df['W'] > 0]['X'] | code |
129024934/cell_44 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1'] | code |
129024934/cell_55 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1)
df3.dropna() | code |
129024934/cell_6 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d) | code |
129024934/cell_39 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df.set_index('States')
df | code |
129024934/cell_26 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1] | code |
129024934/cell_48 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2]
dfnew.loc['G1'].loc[2]['B']
dfnew.xs('G1') | code |
129024934/cell_41 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index | code |
129024934/cell_54 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1) | code |
129024934/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
ser2 | code |
129024934/cell_19 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df[['W', 'Z']] | code |
129024934/cell_50 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2]
dfnew.loc['G1'].loc[2]['B']
dfnew.xs('G1')
dfnew.index.names = ['Groups', 'Num']
dfnew | code |
129024934/cell_7 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
d | code |
129024934/cell_45 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2] | code |
129024934/cell_18 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
print(type(df['W']))
print(type(df)) | code |
129024934/cell_32 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df['W'] > 0 | code |
129024934/cell_51 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2]
dfnew.loc['G1'].loc[2]['B']
dfnew.xs('G1')
dfnew.index.names = ['Groups', 'Num']
dfnew.xs(1, level='Num') | code |
129024934/cell_59 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1)
df3.dropna()
df3.dropna(thresh=2)
df3.fillna(value='X')
df3['A'].fillna(value=df3['A'].mean()) | code |
129024934/cell_58 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3
df3.dropna(axis=1)
df3.dropna()
df3.dropna(thresh=2)
df3.fillna(value='X') | code |
129024934/cell_28 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']] | code |
129024934/cell_16 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df | code |
129024934/cell_38 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df.set_index('States') | code |
129024934/cell_47 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2]
dfnew.loc['G1'].loc[2]['B']
dfnew | code |
129024934/cell_17 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df['W'] | code |
129024934/cell_35 | [
"text_html_output_1.png"
] | states = 'CA NY WY OR'.split()
states | code |
129024934/cell_43 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew | code |
129024934/cell_31 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df['W'] | code |
129024934/cell_46 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
dfnew.loc['G1']
dfnew.loc['G1'].loc[2]
dfnew.loc['G1'].loc[2]['B'] | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.