path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2025290/cell_23 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
lircross = cross_val_score(LiR, X, y, cv=10)
print('Accuracy: %0.2f(+/- %0.2f)' % (lircross.mean(), lircross.std() * 2)) | code |
2025290/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number]) | code |
2025290/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestnum.isnull().sum()
housetestnum['LotFrontage'].fillna(housetestnum['LotFrontage'].mean(), inplace=True)
housetestnum['MasVnrArea'].fillna(housetestnum['MasVnrArea'].mean(), inplace=True)
housetestnum['BsmtFinSF1'].fillna(housetestnum['BsmtFinSF1'].mean(), inplace=True)
housetestnum['BsmtFinSF2'].fillna(housetestnum['BsmtFinSF2'].mean(), inplace=True)
housetestnum['BsmtUnfSF'].fillna(housetestnum['BsmtUnfSF'].mean(), inplace=True)
housetestnum['TotalBsmtSF'].fillna(housetestnum['TotalBsmtSF'].mean(), inplace=True)
housetestnum['BsmtFullBath'].fillna(housetestnum['BsmtFullBath'].mean(), inplace=True)
housetestnum['BsmtHalfBath'].fillna(housetestnum['BsmtHalfBath'].mean(), inplace=True)
housetestnum['GarageCars'].fillna(housetestnum['GarageCars'].mean(), inplace=True)
housetestnum['GarageArea'].fillna(housetestnum['GarageArea'].mean(), inplace=True)
housetestnum['GarageYrBlt'].fillna(housetestnum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2025290/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfnum['LotFrontage'].fillna(housedfnum['LotFrontage'].mean(), inplace=True)
housedfnum['MasVnrArea'].fillna(housedfnum['MasVnrArea'].mean(), inplace=True)
housedfnum['GarageYrBlt'].fillna(housedfnum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2025290/cell_40 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object])
housetestcat.isnull().sum()
housetestcat1 = housetestcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housetestcat1.isnull().sum() | code |
2025290/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum() | code |
2025290/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object])
housetestcat.isnull().sum()
housetestcat1 = housetestcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) | code |
2025290/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice | code |
2025290/cell_41 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object])
housetestcat.isnull().sum()
housetestcat1 = housetestcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housetestcat1.isnull().sum()
housetestcat1['MasVnrType'].fillna(housetestcat1['MasVnrType'].value_counts().idxmax(), inplace=True)
housetestcat1['BsmtQual'].fillna(housetestcat1['BsmtQual'].value_counts().idxmax(), inplace=True)
housetestcat1['BsmtCond'].fillna(housetestcat1['BsmtCond'].value_counts().idxmax(), inplace=True)
housetestcat1['BsmtExposure'].fillna(housetestcat1['BsmtExposure'].value_counts().idxmax(), inplace=True)
housetestcat1['BsmtFinType1'].fillna(housetestcat1['BsmtFinType1'].value_counts().idxmax(), inplace=True)
housetestcat1['BsmtFinType2'].fillna(housetestcat1['BsmtFinType2'].value_counts().idxmax(), inplace=True)
housetestcat1['GarageType'].fillna(housetestcat1['GarageType'].value_counts().idxmax(), inplace=True)
housetestcat1['GarageFinish'].fillna(housetestcat1['GarageFinish'].value_counts().idxmax(), inplace=True)
housetestcat1['GarageQual'].fillna(housetestcat1['GarageQual'].value_counts().idxmax(), inplace=True)
housetestcat1['GarageCond'].fillna(housetestcat1['GarageCond'].value_counts().idxmax(), inplace=True) | code |
2025290/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') | code |
2025290/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat1['MasVnrType'].fillna(housedfcat1['MasVnrType'].value_counts().idxmax(), inplace=True)
housedfcat1['BsmtQual'].fillna(housedfcat1['BsmtQual'].value_counts().idxmax(), inplace=True)
housedfcat1['BsmtCond'].fillna(housedfcat1['BsmtCond'].value_counts().idxmax(), inplace=True)
housedfcat1['BsmtExposure'].fillna(housedfcat1['BsmtExposure'].value_counts().idxmax(), inplace=True)
housedfcat1['BsmtFinType1'].fillna(housedfcat1['BsmtFinType1'].value_counts().idxmax(), inplace=True)
housedfcat1['BsmtFinType2'].fillna(housedfcat1['BsmtFinType2'].value_counts().idxmax(), inplace=True)
housedfcat1['Electrical'].fillna(housedfcat1['Electrical'].value_counts().idxmax(), inplace=True)
housedfcat1['GarageType'].fillna(housedfcat1['GarageType'].value_counts().idxmax(), inplace=True)
housedfcat1['GarageFinish'].fillna(housedfcat1['GarageFinish'].value_counts().idxmax(), inplace=True)
housedfcat1['GarageQual'].fillna(housedfcat1['GarageQual'].value_counts().idxmax(), inplace=True)
housedfcat1['GarageCond'].fillna(housedfcat1['GarageCond'].value_counts().idxmax(), inplace=True) | code |
2025290/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y) | code |
2025290/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y) | code |
2025290/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestnum.isnull().sum() | code |
2025290/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv') | code |
2025290/cell_38 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestnum.isnull().sum()
housetestnum.isnull().sum()
le = LabelEncoder()
housetestnum['MSSubClass'] = le.fit_transform(housetestnum['MSSubClass'].values)
housetestnum['OverallQual'] = le.fit_transform(housetestnum['OverallQual'].values)
housetestnum['OverallCond'] = le.fit_transform(housetestnum['OverallCond'].values)
housetestnum['YearBuilt'] = le.fit_transform(housetestnum['YearBuilt'].values)
housetestnum['YearRemodAdd'] = le.fit_transform(housetestnum['YearRemodAdd'].values)
housetestnum['YrSold'] = le.fit_transform(housetestnum['YrSold'].values)
housetestnum['GarageYrBlt'] = le.fit_transform(housetestnum['GarageYrBlt'].values) | code |
2025290/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum() | code |
2025290/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1) | code |
2025290/cell_35 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object])
housetestcat.isnull().sum() | code |
2025290/cell_43 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object])
housetestnum.isnull().sum()
housetestnum.isnull().sum()
housetestcat.isnull().sum()
le = LabelEncoder()
housetestnum['MSSubClass'] = le.fit_transform(housetestnum['MSSubClass'].values)
housetestnum['OverallQual'] = le.fit_transform(housetestnum['OverallQual'].values)
housetestnum['OverallCond'] = le.fit_transform(housetestnum['OverallCond'].values)
housetestnum['YearBuilt'] = le.fit_transform(housetestnum['YearBuilt'].values)
housetestnum['YearRemodAdd'] = le.fit_transform(housetestnum['YearRemodAdd'].values)
housetestnum['YrSold'] = le.fit_transform(housetestnum['YrSold'].values)
housetestnum['GarageYrBlt'] = le.fit_transform(housetestnum['GarageYrBlt'].values)
housetestcat1 = housetestcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housetestcat1.isnull().sum()
housetestcat1['MSZoning'] = le.fit_transform(housetestcat1['MSZoning'].astype(str))
housetestcat1['Utilities'] = le.fit_transform(housetestcat1['Utilities'].astype(str)) | code |
2025290/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2))
housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv')
housetest.isnull().sum()
housetestnum = housetest.select_dtypes(include=[np.number])
housetestcat = housetest.select_dtypes(include=[object]) | code |
2025290/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X) | code |
2025290/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape | code |
2025290/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
lircross = cross_val_score(LiR, X, y, cv=10)
lircross | code |
2025290/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) | code |
2025290/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform)
housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
housefinal.shape
LiR = LinearRegression()
y = housefinal['SalePrice']
X = housefinal.drop(['Id', 'SalePrice'], axis=1)
LiR.fit(X, y)
LiR.score(X, y)
predictedprice = LiR.predict(X)
priceresidual = housefinal.SalePrice - predictedprice
np.sqrt(np.mean(priceresidual ** 2)) | code |
2025290/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object])
le = LabelEncoder()
housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values)
housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values)
housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values)
housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values)
housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values)
housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values)
housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
housedfcat2 = housedfcat1.apply(le.fit_transform) | code |
2025290/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv')
houseprice.isnull().sum()
housedfnum = houseprice.select_dtypes(include=[np.number])
housedfcat = houseprice.select_dtypes(include=[object]) | code |
329572/cell_23 | [
"text_html_output_1.png"
] | c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 100)]
var_list, p_var_list = (get_vars(c_ids), get_vars(c_ids, percent=True)) | code |
329572/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
def demandVar(c_id, df, percent=False):
""" Get the amounts by which product demand changed
week-to-week for a given set of client ids. Returned
object is a pandas dataframe with NaN entries where
the product was not ordered the week before or after.
"""
for week in range(4, 10):
try:
vals_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Demanda_uni_equil.values
prod_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Producto_ID.values
dict_a = {p: v for p, v in zip(prod_a, vals_a)}
vals_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Demanda_uni_equil.values
prod_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Producto_ID.values
dict_b = {p: v for p, v in zip(prod_b, vals_b)}
dict_merge = {}
for key in np.unique(np.concatenate((prod_a, prod_b))):
try:
if percent:
try:
dict_merge[key] = (dict_b[key].astype(int) - dict_a[key].astype(int)) / (dict_b[key].astype(int) + dict_a[key].astype(int))
except:
dict_merge[key] = 0.0
else:
dict_merge[key] = dict_b[key].astype(int) - dict_a[key].astype(int)
except:
dict_merge[key] = np.nan
if week == 4:
df_return = pd.DataFrame({'week_3-4': list(dict_merge.values())}, index=list(dict_merge.keys()))
else:
df_new = pd.DataFrame({'week_' + str(week - 1) + '-' + str(week): list(dict_merge.values())}, index=list(dict_merge.keys()))
df_return = pd.merge(df_return, df_new, how='outer', left_index=True, right_index=True)
except:
return df_return
c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 5)]
c_ids
var = demandVar(c_id=c_ids[0], df=df)
var
var = demandVar(c_id=c_ids[1], df=df, percent=True)
var
def get_vars(c_ids, percent=False):
""" Return a list of variations in the demand
week-to-week on individual products for a set of clients. """
return_list = [[] for _ in range(len(c_ids))]
for i, c_id in enumerate(c_ids):
var = demandVar(c_id, df, percent)
for col in var.columns:
return_list[i] += list(var[col].dropna())
return return_list
var_list = get_vars(c_ids)
colors = ['blue', 'red', 'green', 'turquoise', 'brown']
fig, ax = plt.subplots(1, 2)
plt.suptitle('Change in demand on individual poducts for 5 clients')
for i in range(5):
ax[0].hist(var_list[i], color=colors[i], normed=True, alpha=0.5, bins=20)
ax[0].set_ylim(0, 0.2)
ax[0].set_xlabel('Change in demand')
ax[0].set_ylabel('Normed frequency')
for i in range(5):
ax[1].hist(var_list[i], color=colors[i], normed=True, alpha=0.5, bins=20)
ax[1].set_ylim(0, 1) | code |
329572/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
df.Demanda_uni_equil.hist(bins=100, log=True)
plt.xlabel('Demand per week')
plt.ylabel('Number of clients') | code |
329572/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
print('plotting {0:.2f} % of data'.format(100 * (demand_sorted < 30).sum() / len(demand_sorted)))
demand_sorted[demand_sorted < 30].hist(bins=30)
plt.xlabel('Demand per week')
plt.ylabel('Number of clients') | code |
329572/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
def demandVar(c_id, df, percent=False):
""" Get the amounts by which product demand changed
week-to-week for a given set of client ids. Returned
object is a pandas dataframe with NaN entries where
the product was not ordered the week before or after.
"""
for week in range(4, 10):
try:
vals_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Demanda_uni_equil.values
prod_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Producto_ID.values
dict_a = {p: v for p, v in zip(prod_a, vals_a)}
vals_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Demanda_uni_equil.values
prod_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Producto_ID.values
dict_b = {p: v for p, v in zip(prod_b, vals_b)}
dict_merge = {}
for key in np.unique(np.concatenate((prod_a, prod_b))):
try:
if percent:
try:
dict_merge[key] = (dict_b[key].astype(int) - dict_a[key].astype(int)) / (dict_b[key].astype(int) + dict_a[key].astype(int))
except:
dict_merge[key] = 0.0
else:
dict_merge[key] = dict_b[key].astype(int) - dict_a[key].astype(int)
except:
dict_merge[key] = np.nan
if week == 4:
df_return = pd.DataFrame({'week_3-4': list(dict_merge.values())}, index=list(dict_merge.keys()))
else:
df_new = pd.DataFrame({'week_' + str(week - 1) + '-' + str(week): list(dict_merge.values())}, index=list(dict_merge.keys()))
df_return = pd.merge(df_return, df_new, how='outer', left_index=True, right_index=True)
except:
return df_return
c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 5)]
c_ids
var = demandVar(c_id=c_ids[0], df=df)
var
var = demandVar(c_id=c_ids[1], df=df, percent=True)
var | code |
329572/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
def demandVar(c_id, df, percent=False):
""" Get the amounts by which product demand changed
week-to-week for a given set of client ids. Returned
object is a pandas dataframe with NaN entries where
the product was not ordered the week before or after.
"""
for week in range(4, 10):
try:
vals_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Demanda_uni_equil.values
prod_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Producto_ID.values
dict_a = {p: v for p, v in zip(prod_a, vals_a)}
vals_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Demanda_uni_equil.values
prod_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Producto_ID.values
dict_b = {p: v for p, v in zip(prod_b, vals_b)}
dict_merge = {}
for key in np.unique(np.concatenate((prod_a, prod_b))):
try:
if percent:
try:
dict_merge[key] = (dict_b[key].astype(int) - dict_a[key].astype(int)) / (dict_b[key].astype(int) + dict_a[key].astype(int))
except:
dict_merge[key] = 0.0
else:
dict_merge[key] = dict_b[key].astype(int) - dict_a[key].astype(int)
except:
dict_merge[key] = np.nan
if week == 4:
df_return = pd.DataFrame({'week_3-4': list(dict_merge.values())}, index=list(dict_merge.keys()))
else:
df_new = pd.DataFrame({'week_' + str(week - 1) + '-' + str(week): list(dict_merge.values())}, index=list(dict_merge.keys()))
df_return = pd.merge(df_return, df_new, how='outer', left_index=True, right_index=True)
except:
return df_return
c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 5)]
c_ids
var = demandVar(c_id=c_ids[0], df=df)
var | code |
329572/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
def demandVar(c_id, df, percent=False):
""" Get the amounts by which product demand changed
week-to-week for a given set of client ids. Returned
object is a pandas dataframe with NaN entries where
the product was not ordered the week before or after.
"""
for week in range(4, 10):
try:
vals_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Demanda_uni_equil.values
prod_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Producto_ID.values
dict_a = {p: v for p, v in zip(prod_a, vals_a)}
vals_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Demanda_uni_equil.values
prod_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Producto_ID.values
dict_b = {p: v for p, v in zip(prod_b, vals_b)}
dict_merge = {}
for key in np.unique(np.concatenate((prod_a, prod_b))):
try:
if percent:
try:
dict_merge[key] = (dict_b[key].astype(int) - dict_a[key].astype(int)) / (dict_b[key].astype(int) + dict_a[key].astype(int))
except:
dict_merge[key] = 0.0
else:
dict_merge[key] = dict_b[key].astype(int) - dict_a[key].astype(int)
except:
dict_merge[key] = np.nan
if week == 4:
df_return = pd.DataFrame({'week_3-4': list(dict_merge.values())}, index=list(dict_merge.keys()))
else:
df_new = pd.DataFrame({'week_' + str(week - 1) + '-' + str(week): list(dict_merge.values())}, index=list(dict_merge.keys()))
df_return = pd.merge(df_return, df_new, how='outer', left_index=True, right_index=True)
except:
return df_return
c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 5)]
c_ids
var = demandVar(c_id=c_ids[0], df=df)
var
var = demandVar(c_id=c_ids[1], df=df, percent=True)
var
def get_vars(c_ids, percent=False):
""" Return a list of variations in the demand
week-to-week on individual products for a set of clients. """
return_list = [[] for _ in range(len(c_ids))]
for i, c_id in enumerate(c_ids):
var = demandVar(c_id, df, percent)
for col in var.columns:
return_list[i] += list(var[col].dropna())
return return_list
var_list = get_vars(c_ids)
colors = ['blue', 'red', 'green', 'turquoise', 'brown']
fig, ax = plt.subplots(1,2)
plt.suptitle('Change in demand on individual poducts for 5 clients')
for i in range(5):
ax[0].hist(var_list[i], color=colors[i],
normed=True, alpha=0.5, bins=20)
ax[0].set_ylim(0,0.2)
ax[0].set_xlabel('Change in demand')
ax[0].set_ylabel('Normed frequency')
for i in range(5):
ax[1].hist(var_list[i], color=colors[i],
normed=True, alpha=0.5, bins=20)
ax[1].set_ylim(0,1)
var_list = get_vars(c_ids, percent=True)
colors = ['blue', 'red', 'green', 'turquoise', 'brown']
fig, ax = plt.subplots(1, 2)
plt.suptitle('Percent change in demand on individual poducts for 5 clients')
for i in range(5):
ax[0].hist(var_list[i], color=colors[i], normed=True, alpha=0.5, bins=20)
ax[0].set_ylim(0, 2)
ax[0].set_xlabel('Change in demand')
ax[0].set_ylabel('Normed frequency')
for i in range(5):
ax[1].hist(var_list[i], color=colors[i], normed=True, alpha=0.5, bins=20) | code |
329572/cell_12 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
demand_sorted = df.Demanda_uni_equil.sort_values(ascending=True)
def demandVar(c_id, df, percent=False):
""" Get the amounts by which product demand changed
week-to-week for a given set of client ids. Returned
object is a pandas dataframe with NaN entries where
the product was not ordered the week before or after.
"""
for week in range(4, 10):
try:
vals_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Demanda_uni_equil.values
prod_a = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week - 1)].Producto_ID.values
dict_a = {p: v for p, v in zip(prod_a, vals_a)}
vals_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Demanda_uni_equil.values
prod_b = df[(df.Cliente_ID.values == c_id) & (df.Semana.values == week)].Producto_ID.values
dict_b = {p: v for p, v in zip(prod_b, vals_b)}
dict_merge = {}
for key in np.unique(np.concatenate((prod_a, prod_b))):
try:
if percent:
try:
dict_merge[key] = (dict_b[key].astype(int) - dict_a[key].astype(int)) / (dict_b[key].astype(int) + dict_a[key].astype(int))
except:
dict_merge[key] = 0.0
else:
dict_merge[key] = dict_b[key].astype(int) - dict_a[key].astype(int)
except:
dict_merge[key] = np.nan
if week == 4:
df_return = pd.DataFrame({'week_3-4': list(dict_merge.values())}, index=list(dict_merge.keys()))
else:
df_new = pd.DataFrame({'week_' + str(week - 1) + '-' + str(week): list(dict_merge.values())}, index=list(dict_merge.keys()))
df_return = pd.merge(df_return, df_new, how='outer', left_index=True, right_index=True)
except:
return df_return
c_ids = [df.Cliente_ID.values[int(i)] for i in np.linspace(0, len(df) - 1, 5)]
c_ids | code |
329572/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
types = {'Semana': np.uint8, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint32}
df = pd.read_csv('../input/train.csv', usecols=types.keys(), dtype=types)
df.Demanda_uni_equil.hist(bins=100)
plt.xlabel('Demand per week')
plt.ylabel('Number of clients') | code |
2026814/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min) | code |
2026814/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from tqdm import tqdm | code |
2026814/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min)
fig, ax = plt.subplots(2, 4, figsize=[12, 8])
ax[0, 0].imshow(X_train[0, :, :, 0])
ax[0, 1].imshow(X_train[0, :, :, 2])
ax[0, 2].imshow(X_train[2, :, :, 0])
ax[0, 3].imshow(X_train[2, :, :, 2])
ax[1, 0].imshow(X_train[1, :, :, 0])
ax[1, 1].imshow(X_train[1, :, :, 2])
ax[1, 2].imshow(X_train[6, :, :, 0])
ax[1, 3].imshow(X_train[6, :, :, 2]) | code |
2026814/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import numpy as np
import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
lbl = OneHotEncoder()
lbl.fit([[0], [1]])
y = lbl.transform(y).toarray() | code |
2026814/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
lbl = OneHotEncoder()
lbl.fit([[0], [1]])
y = lbl.transform(y).toarray()
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min)
def get_batches(x, y, batch_size=10):
n_batches = len(x) // batch_size
for ii in range(0, n_batches * batch_size, batch_size):
if ii != (n_batches - 1) * batch_size:
X, Y = (x[ii:ii + batch_size], y[ii:ii + batch_size])
else:
X, Y = (x[ii:], y[ii:])
yield (X, Y)
inputs = tf.placeholder(tf.float32, [None, 75, 75, 3])
labels = tf.placeholder(tf.int32)
conv1 = tf.layers.conv2d(inputs=inputs, filters=8, kernel_size=(7, 7), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=(5, 5), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv3 = tf.layers.conv2d(inputs=pool2, filters=16, kernel_size=(3, 3), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=(2, 2), padding='SAME')
flat = tf.reshape(pool3, [-1, 1600])
fc1 = tf.layers.dense(flat, units=256, use_bias=True, activation=tf.nn.relu)
dp1 = tf.layers.dropout(fc1, rate=0.25)
fc2 = tf.layers.dense(dp1, units=64, use_bias=True, activation=tf.nn.relu)
dp2 = tf.layers.dropout(fc2, rate=0.25)
logits = tf.layers.dense(dp2, units=2, use_bias=True)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.cast(labels, tf.float32))
cost = tf.reduce_mean(loss)
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
n_epoches = 100
batch_size = 32
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoches):
for X_batch, y_batch in get_batches(train_data, train_label, batch_size):
feed_dict = {inputs: X_batch, labels: y_batch}
train_cost, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
feed_dict = {inputs: X_train, labels: y}
train_accuracy = sess.run(accuracy, feed_dict=feed_dict)
feed_dict = {inputs: val_data, labels: val_label}
val_accuracy = sess.run(accuracy, feed_dict=feed_dict)
saver.save(sess, 'checkpoints/cnn_100.ckpt')
test_batch_size = 128
test_pred_res = []
with tf.Session() as sess:
saver.restore(sess, 'checkpoints/cnn_100.ckpt')
for i in tqdm(range(0, X_test.shape[0], test_batch_size)):
test_batch = X_test[i:i + test_batch_size, :, :, :]
feed_dict = {inputs: test_batch}
test_pred = sess.run(predicted, feed_dict=feed_dict)
test_pred_res.append(test_pred.tolist())
test_pred_res = np.concatenate(test_pred_res)
cnn_submit = submission.copy()
cnn_submit.is_iceberg = test_pred_res[:, 1]
cnn_submit.to_csv('./cnn_100_submit.csv') | code |
2026814/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import numpy as np
import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
lbl = OneHotEncoder()
lbl.fit([[0], [1]])
y = lbl.transform(y).toarray()
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min)
train_data, train_label = (X_train[:1400, :, :, :], y[:1400, :])
val_data, val_label = (X_train[1400:, :, :, :], y[1400:, :]) | code |
2026814/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import numpy as np
import pandas as pd
import tensorflow as tf
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
lbl = OneHotEncoder()
lbl.fit([[0], [1]])
y = lbl.transform(y).toarray()
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min)
def get_batches(x, y, batch_size=10):
n_batches = len(x) // batch_size
for ii in range(0, n_batches * batch_size, batch_size):
if ii != (n_batches - 1) * batch_size:
X, Y = (x[ii:ii + batch_size], y[ii:ii + batch_size])
else:
X, Y = (x[ii:], y[ii:])
yield (X, Y)
inputs = tf.placeholder(tf.float32, [None, 75, 75, 3])
labels = tf.placeholder(tf.int32)
conv1 = tf.layers.conv2d(inputs=inputs, filters=8, kernel_size=(7, 7), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=(5, 5), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv3 = tf.layers.conv2d(inputs=pool2, filters=16, kernel_size=(3, 3), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=(2, 2), padding='SAME')
flat = tf.reshape(pool3, [-1, 1600])
fc1 = tf.layers.dense(flat, units=256, use_bias=True, activation=tf.nn.relu)
dp1 = tf.layers.dropout(fc1, rate=0.25)
fc2 = tf.layers.dense(dp1, units=64, use_bias=True, activation=tf.nn.relu)
dp2 = tf.layers.dropout(fc2, rate=0.25)
logits = tf.layers.dense(dp2, units=2, use_bias=True)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.cast(labels, tf.float32))
cost = tf.reduce_mean(loss)
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
n_epoches = 100
batch_size = 32
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoches):
for X_batch, y_batch in get_batches(train_data, train_label, batch_size):
feed_dict = {inputs: X_batch, labels: y_batch}
train_cost, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
feed_dict = {inputs: X_train, labels: y}
train_accuracy = sess.run(accuracy, feed_dict=feed_dict)
feed_dict = {inputs: val_data, labels: val_label}
val_accuracy = sess.run(accuracy, feed_dict=feed_dict)
print('epoch {}, train accuracy: {:5f}, validation accuracy: {:.5f}'.format(epoch + 1, train_accuracy, val_accuracy))
saver.save(sess, 'checkpoints/cnn_100.ckpt') | code |
2026814/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id') | code |
2026814/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
lbl = OneHotEncoder()
lbl.fit([[0], [1]])
y = lbl.transform(y).toarray()
X_min = np.min(X_train)
X_max = np.max(X_train)
X_train = (X_train - X_min) / (X_max - X_min)
X_test = (X_test - X_min) / (X_max - X_min)
def get_batches(x, y, batch_size=10):
n_batches = len(x) // batch_size
for ii in range(0, n_batches * batch_size, batch_size):
if ii != (n_batches - 1) * batch_size:
X, Y = (x[ii:ii + batch_size], y[ii:ii + batch_size])
else:
X, Y = (x[ii:], y[ii:])
yield (X, Y)
inputs = tf.placeholder(tf.float32, [None, 75, 75, 3])
labels = tf.placeholder(tf.int32)
conv1 = tf.layers.conv2d(inputs=inputs, filters=8, kernel_size=(7, 7), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=(5, 5), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=(2, 2), padding='SAME')
conv3 = tf.layers.conv2d(inputs=pool2, filters=16, kernel_size=(3, 3), strides=(1, 1), padding='SAME', activation=tf.nn.relu, use_bias=True)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=(2, 2), padding='SAME')
flat = tf.reshape(pool3, [-1, 1600])
fc1 = tf.layers.dense(flat, units=256, use_bias=True, activation=tf.nn.relu)
dp1 = tf.layers.dropout(fc1, rate=0.25)
fc2 = tf.layers.dense(dp1, units=64, use_bias=True, activation=tf.nn.relu)
dp2 = tf.layers.dropout(fc2, rate=0.25)
logits = tf.layers.dense(dp2, units=2, use_bias=True)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.cast(labels, tf.float32))
cost = tf.reduce_mean(loss)
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
n_epoches = 100
batch_size = 32
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoches):
for X_batch, y_batch in get_batches(train_data, train_label, batch_size):
feed_dict = {inputs: X_batch, labels: y_batch}
train_cost, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
feed_dict = {inputs: X_train, labels: y}
train_accuracy = sess.run(accuracy, feed_dict=feed_dict)
feed_dict = {inputs: val_data, labels: val_label}
val_accuracy = sess.run(accuracy, feed_dict=feed_dict)
saver.save(sess, 'checkpoints/cnn_100.ckpt')
test_batch_size = 128
test_pred_res = []
with tf.Session() as sess:
saver.restore(sess, 'checkpoints/cnn_100.ckpt')
for i in tqdm(range(0, X_test.shape[0], test_batch_size)):
test_batch = X_test[i:i + test_batch_size, :, :, :]
feed_dict = {inputs: test_batch}
test_pred = sess.run(predicted, feed_dict=feed_dict)
test_pred_res.append(test_pred.tolist())
test_pred_res = np.concatenate(test_pred_res) | code |
2026814/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
data_path = './data'
train = pd.read_json(data_path + '/' + 'train.json')
test = pd.read_json(data_path + '/' + 'test.json')
submission = pd.read_csv(data_path + '/' + 'sample_submission.csv').set_index('id')
train_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_1])
train_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in train.band_2])
X_train = np.concatenate([train_band_1[:, :, :, np.newaxis], train_band_2[:, :, :, np.newaxis], ((train_band_1 + train_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
test_band_1 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_1])
test_band_2 = np.array([np.array(band).astype(np.float32).reshape((75, 75)) for band in test.band_2])
X_test = np.concatenate([test_band_1[:, :, :, np.newaxis], test_band_2[:, :, :, np.newaxis], ((test_band_1 + test_band_2) / 2)[:, :, :, np.newaxis]], axis=-1)
y = np.array([target for target in train.is_iceberg]).reshape((-1, 1))
print('train_band_1 shape: {}'.format(train_band_1.shape))
print('train_band_2 shape: {}'.format(train_band_2.shape))
print('train_train shape: {}'.format(X_train.shape))
print('train label shape: {}'.format(y.shape))
print('test_band_1 shape: {}'.format(test_band_1.shape))
print('test_band_2 shape: {}'.format(test_band_2.shape))
print('test_train shape: {}'.format(X_test.shape)) | code |
105180335/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('The data in Groceries is:', groceries.values)
print('The index of Groceries is:', groceries.index) | code |
105180335/cell_25 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import numpy as np
print('Original grocery list of fruits:\n', fruits)
print()
print('EXP(X) = \n', np.exp(fruits))
print()
print('SQRT(X) =\n', np.sqrt(fruits))
print()
print('POW(X,2) =\n', np.power(fruits, 2)) | code |
105180335/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
print('shopping_carts has shape:', shopping_carts.shape)
print('shopping_carts has dimension:', shopping_carts.ndim)
print('shopping_carts has a total of:', shopping_carts.size, 'elements')
print()
print('The data in shopping_carts is:\n', shopping_carts.values)
print()
print('The row index in shopping_carts is:', shopping_carts.index)
print()
print('The column index in shopping_carts is:', shopping_carts.columns) | code |
105180335/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
print('Original grocery list of fruits:\n ', fruits)
print()
print('fruits + 2:\n', fruits + 2)
print()
print('fruits - 2:\n', fruits - 2)
print()
print('fruits * 2:\n', fruits * 2)
print()
print('fruits / 2:\n', fruits / 2)
print() | code |
105180335/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts | code |
105180335/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('Original Grocery List:\n', groceries)
groceries.drop('apples', inplace=True)
print()
print('Grocery List after removing apples in place:\n', groceries) | code |
105180335/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
print(type(items)) | code |
105180335/cell_39 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df | code |
105180335/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
print('Original grocery list of fruits:\n ', fruits)
print()
print('Amount of bananas + 2 = ', fruits['bananas'] + 2)
print()
print('Amount of apples - 2 = ', fruits.iloc[0] - 2)
print()
print('We double the amount of apples and oranges:\n', fruits[['apples', 'oranges']] * 2)
print()
print('We half the amount of apples and oranges:\n', fruits.loc[['apples', 'oranges']] / 2) | code |
105180335/cell_48 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
store_items.insert(4, 'shoes', [8, 5])
store_items | code |
105180335/cell_41 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items | code |
105180335/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('The data in Groceries is:', groceries.values)
print('checking if we have eggs in the groceies:', 'egg' in groceries)
print('checking if we have bananas in the groceies:', 'bananas' in groceries) | code |
105180335/cell_50 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
new_items = [{'bikes': 20, 'pants': 30, 'watches': 35, 'glasses': 4}]
new_store = pd.DataFrame(new_items, index=['store 3'])
new_store | code |
105180335/cell_45 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
store_items['shirts'] = [15, 2]
store_items | code |
105180335/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('Original Grocery List:\n', groceries)
print()
print('We remove apples (out of place):\n', groceries.drop('apples'))
print()
print('Grocery List after removing apples out of place:\n', groceries) | code |
105180335/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df | code |
105180335/cell_51 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
store_items.insert(4, 'shoes', [8, 5])
store_items
new_items = [{'bikes': 20, 'pants': 30, 'watches': 35, 'glasses': 4}]
new_store = pd.DataFrame(new_items, index=['store 3'])
new_store
store_items = store_items.append(new_store)
store_items | code |
105180335/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('Groceries has shape:', groceries.shape)
print('Groceries has dimension:', groceries.ndim)
print('Groceries has a total of', groceries.size, 'elements') | code |
105180335/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('Original Grocery List:\n', groceries)
groceries['eggs'] = 2
print()
print('Modified Grocery List:\n', groceries) | code |
105180335/cell_38 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart | code |
105180335/cell_47 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
store_items['new watches'] = store_items['watches'][1:]
store_items | code |
105180335/cell_43 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
print()
print('How many bikes are in each store:\n', store_items[['bikes']])
print()
print('How many bikes and pants are in each store:\n', store_items[['bikes', 'pants']])
print()
print('What items are in Store 1:\n', store_items.loc[['store 1']])
print()
print('How many bikes are in Store 2:', store_items['bikes']['store 2']) | code |
105180335/cell_46 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart
alice_sel_shopping_cart = pd.DataFrame(items, index=['glasses', 'bike'], columns=['Alice'])
alice_sel_shopping_cart
data = {'Integers': [1, 2, 3], 'Floats': [4.5, 8.2, 9.6]}
df = pd.DataFrame(data)
df
items = [{'bikes': 15, 'pants': 20, 'watches': 35}, {'bikes': 12, 'pants': 30, 'watches': 40, 'glass': 10}]
store_items = pd.DataFrame(items, index=['store 1', 'store 2'])
store_items
store_items['suits'] = store_items['pants'] + store_items['shirts']
store_items | code |
105180335/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
print('How many eggs do we need to buy:', groceries['eggs'])
print()
print('Do we need milk and bread:\n', groceries[['milk', 'bread']])
print()
print('How many eggs and apples do we need to buy:\n', groceries.loc[['eggs', 'apples']])
print()
print('How many eggs and apples do we need to buy:\n', groceries[[0, 1]])
print()
print('Do we need bread:\n', groceries[-1])
print()
print('How many eggs do we need to buy:', groceries[0])
print()
print('Do we need milk and bread:\n', groceries.iloc[[2, 3]]) | code |
105180335/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits | code |
105180335/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart
sel_shopping_cart = pd.DataFrame(items, index=['pants', 'book'])
sel_shopping_cart | code |
105180335/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries | code |
105180335/cell_36 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
groceries = pd.Series(data=[20, 6, 'Yes', 'No'], index=['eggs', 'apples', 'milk', 'bread'])
groceries
fruits = pd.Series(data=[10, 6, 3], index=['apples', 'oranges', 'bananas'])
fruits
import pandas as pd
items = {'Bob': pd.Series(data=[245, 25, 55], index=['bike', 'pants', 'watch']), 'Alice': pd.Series(data=[40, 110, 500, 45], index=['book', 'glasses', 'bike', 'pants'])}
shopping_carts = pd.DataFrame(items)
shopping_carts
data = {'Bob': pd.Series([245, 25, 55]), 'Alice': pd.Series([40, 110, 500, 45])}
df = pd.DataFrame(data)
df
bob_shopping_cart = pd.DataFrame(items, columns=['Bob'])
bob_shopping_cart | code |
128045003/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks
top_loudest_tracks.plot.barh()
plt.show() | code |
128045003/cell_9 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
ds.describe() | code |
128045003/cell_4 | [
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.info() | code |
128045003/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks
top_loudest_tracks.plot.barh()
top_dancable_songs = ds[['danceability', 'artists', 'name']].sort_values(by='danceability', ascending=False)[:5]
top_dancable_songs
top_instrumental_songs = ds[['instrumentalness', 'artists', 'name']].dropna(subset=['artists']).sort_values(by='instrumentalness', ascending=False)[:5]
top_instrumental_songs
interest_feature_cols = ['loudness', 'acousticness', 'danceability', 'duration_ms', 'energy', 'instrumentalness', 'liveness', 'speechiness']
for feature_cols in interest_feature_cols:
pos_data = ds[ds['mode'] == 1][feature_cols]
neg_data = ds[ds['mode'] == 0][feature_cols]
pos_data = pd.melt(pos_data.to_frame(), value_name=f'{feature_cols}_pos')
neg_data = pd.melt(neg_data.to_frame(), value_name=f'{feature_cols}_neg')
plt.figure(figsize=(12, 7))
sns.histplot(data=pos_data, x=f'{feature_cols}_pos', bins=40, color='green', alpha=0.5, label='Positive')
sns.histplot(data=neg_data, x=f'{feature_cols}_neg', bins=40, color='red', alpha=0.5, label='Negative')
plt.legend(loc='upper right')
plt.title(f'Positive and Negative Histogram for {feature_cols}')
plt.show() | code |
128045003/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns | code |
128045003/cell_2 | [
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds | code |
128045003/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
plt.show() | code |
128045003/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns) | code |
128045003/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks
top_loudest_tracks.plot.barh()
top_dancable_songs = ds[['danceability', 'artists', 'name']].sort_values(by='danceability', ascending=False)[:5]
top_dancable_songs
top_instrumental_songs = ds[['instrumentalness', 'artists', 'name']].dropna(subset=['artists']).sort_values(by='instrumentalness', ascending=False)[:5]
top_instrumental_songs
plt.figure(figsize=(12, 7))
plt.pie(x='instrumentalness', autopct='%1.2f%%', data=top_instrumental_songs, labels=top_instrumental_songs.artists)
plt.title('Top 5 Instrumental Tracks by Genre')
plt.show() | code |
128045003/cell_8 | [
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
ds.head() | code |
128045003/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_dancable_songs = ds[['danceability', 'artists', 'name']].sort_values(by='danceability', ascending=False)[:5]
top_dancable_songs | code |
128045003/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks
top_loudest_tracks.plot.barh()
top_dancable_songs = ds[['danceability', 'artists', 'name']].sort_values(by='danceability', ascending=False)[:5]
top_dancable_songs
plt.figure(figsize=(12, 7))
sns.catplot(x='danceability', y='artists', data=top_dancable_songs, kind='bar', height=10, aspect=1.5)
plt.title('Top 5 Dancable Tracks')
plt.show() | code |
128045003/cell_3 | [
"image_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum() | code |
128045003/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_instrumental_songs = ds[['instrumentalness', 'artists', 'name']].dropna(subset=['artists']).sort_values(by='instrumentalness', ascending=False)[:5]
top_instrumental_songs | code |
128045003/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_ten_genre.plot.barh()
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks
top_loudest_tracks.plot.barh()
plt.figure(figsize=(12, 7))
sns.barplot(x='loudness', y='artists', data=top_loudest_tracks)
plt.title('Top 5 loudest Tracks')
plt.show() | code |
128045003/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre | code |
128045003/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape
ds.columns
len(ds.columns)
top_ten_genre = ds.groupby('artists').count().sort_values(by='name', ascending=False)['name'][:10]
top_ten_genre
top_loudest_tracks = ds[['loudness', 'artists']].sort_values(by='loudness', ascending=False)[:5]
top_loudest_tracks | code |
128045003/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
ds = pd.read_csv('/kaggle/input/spotify-datacsv/spotify_data.csv', dtype={'19': float})
ds
ds.isna().sum()
ds.shape | code |
50219835/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50219835/cell_8 | [
"text_plain_output_1.png"
] | from pytorch_tabnet.tab_model import TabNetClassifier
from pytorch_tabnet.tab_model import TabNetClassifier
clf = TabNetClassifier()
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)], max_epochs=2) | code |
50219835/cell_3 | [
"text_plain_output_1.png"
] | !pip install pytorch-tabnet | code |
18140562/cell_21 | [
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import seaborn as sns
from imblearn.over_sampling import SMOTE
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from sklearn.preprocessing import LabelEncoder
from xgboost import XGBClassifier
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
df_train = pd.read_excel('../input/Data_Train.xlsx')
df_test = pd.read_excel('../input/Data_Test.xlsx')
df_train.sample(5)
df_train.isna().sum()
X = df_train['STORY']
y = df_train['SECTION']
tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), stop_words=stop_words)
tfidf_vec.fit(X)
x_vec = tfidf_vec.transform(X)
svc = LinearSVC(C=10.0)
svc = LinearSVC(C=10.0)
svc.fit(x_train_vec, y_train)
y_preds = svc.predict(x_eval_vec)
accuracy_score(y_preds, y_eval) | code |
18140562/cell_25 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import seaborn as sns
from imblearn.over_sampling import SMOTE
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from sklearn.preprocessing import LabelEncoder
from xgboost import XGBClassifier
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
df_train = pd.read_excel('../input/Data_Train.xlsx')
df_test = pd.read_excel('../input/Data_Test.xlsx')
df_train.sample(5)
df_train.isna().sum()
X = df_train['STORY']
y = df_train['SECTION']
tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), stop_words=stop_words)
tfidf_vec.fit(X)
x_vec = tfidf_vec.transform(X)
svc = LinearSVC(C=10.0)
svc = LinearSVC(C=10.0)
svc.fit(x_train_vec, y_train)
y_preds = svc.predict(x_eval_vec)
svc = LinearSVC(C=10.0)
svc.fit(x_vec, y) | code |
18140562/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_excel('../input/Data_Train.xlsx')
df_test = pd.read_excel('../input/Data_Test.xlsx')
df_train.sample(5) | code |
18140562/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_excel('../input/Data_Train.xlsx')
df_test = pd.read_excel('../input/Data_Test.xlsx')
df_train.sample(5)
df_train.isna().sum() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.