path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
89135215/cell_15 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new['1stFlrSF'].hist(bins=50)
plt.show() | code |
89135215/cell_16 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new['BsmtUnfSF'].hist(bins=50)
plt.show() | code |
89135215/cell_17 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new['GrLivArea'].hist(bins=50)
plt.show() | code |
89135215/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new['TotalBsmtSF'].hist(bins=50)
plt.show() | code |
89135215/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new.info() | code |
72076990/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
test.isna().sum()[test.isna().sum() > 0]
for col in cat_cols:
print('***' + col + '***')
print('Number of unique cat:', test[col].nunique())
print(test[col].value_counts()) | code |
72076990/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape | code |
72076990/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
train['target'].hist() | code |
72076990/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
print(f'Train Shape: {train.shape}\nTest Shape: {test.shape}') | code |
72076990/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train.isna().sum()[train.isna().sum() > 0]
for col in cat_cols:
print('***' + col + '***')
print('Number of unique cat:', train[col].nunique())
print(train[col].value_counts()) | code |
72076990/cell_6 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols | code |
72076990/cell_2 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
plt.style.use('ggplot') | code |
72076990/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train.isna().sum()[train.isna().sum() > 0]
train[cat_cols].sample(5) | code |
72076990/cell_7 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols | code |
72076990/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
train['target'].describe() | code |
72076990/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train.isna().sum()[train.isna().sum() > 0] | code |
72076990/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
test.isna().sum()[test.isna().sum() > 0] | code |
72076990/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train.isna().sum()[train.isna().sum() > 0]
X_train, X_valid, y_train, y_valid = train_test_split(train.drop(columns=['target']), train['target'].values, test_size=0.1, random_state=42)
(X_train.shape, X_valid.shape)
rf = RandomForestRegressor(random_state=42, max_depth=6, max_leaf_nodes=5, n_jobs=-1)
rf.fit(X_train.drop(columns=['id']), y_train)
preds_rf = rf.predict(X_valid.drop(columns=['id']))
print('RMSE:', mean_squared_error(y_valid, preds_rf, squared=False)) | code |
72076990/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train['target'].hist() | code |
72076990/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train.isna().sum()[train.isna().sum() > 0]
X_train, X_valid, y_train, y_valid = train_test_split(train.drop(columns=['target']), train['target'].values, test_size=0.1, random_state=42)
(X_train.shape, X_valid.shape) | code |
72076990/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
train['target'].hist() | code |
72076990/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes | code |
72081792/cell_9 | [
"text_plain_output_1.png"
] | from tqdm.auto import tqdm
from transformers import TFAutoModel, AutoTokenizer
import pandas as pd
import pathlib
import tensorflow as tf
ROOT_PATH = pathlib.Path('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/')
MODEL = 'distilbert-base-multilingual-cased'
BATCH_SIZE = 32
EPOCHS = 1
MAX_DOC_LENGTH = 256
train_df = pd.read_csv(ROOT_PATH / 'jigsaw-toxic-comment-train.csv')
valid_df = pd.read_csv(ROOT_PATH / 'validation.csv')
test_df = pd.read_csv(ROOT_PATH / 'test.csv').rename(columns={'content': 'comment_text'})
train_df.sample(5)
class Tokenizer:
def __init__(self, tokenizer, max_doc_length: int, padding=True) -> None:
self.tokenizer = tokenizer
self.max_doc_length = max_doc_length
self.padding = padding
def __call__(self, x):
return self.tokenizer(x, max_length=self.max_doc_length, truncation=True, padding=self.padding, return_tensors='tf')
tokenizer = Tokenizer(AutoTokenizer.from_pretrained(MODEL), MAX_DOC_LENGTH)
def get_tokenized_values(text, tokenizer, batch_size):
input_ids = []
attention_mask = []
for i in tqdm(range(0, len(text), batch_size)):
tokenized_batch = tokenizer(text[i:i + batch_size])
input_ids.append(tokenized_batch['input_ids'])
attention_mask.append(tokenized_batch['attention_mask'])
return (tf.concat(input_ids, axis=0), tf.concat(attention_mask, axis=0))
train_input_ids, train_attention_mask = get_tokenized_values(train_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
valid_input_ids, valid_attention_mask = get_tokenized_values(valid_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
test_input_ids, test_attention_mask = get_tokenized_values(test_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
y_train = train_df.toxic.values
y_valid = valid_df.toxic.values | code |
72081792/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pathlib
ROOT_PATH = pathlib.Path('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/')
MODEL = 'distilbert-base-multilingual-cased'
BATCH_SIZE = 32
EPOCHS = 1
MAX_DOC_LENGTH = 256
train_df = pd.read_csv(ROOT_PATH / 'jigsaw-toxic-comment-train.csv')
valid_df = pd.read_csv(ROOT_PATH / 'validation.csv')
test_df = pd.read_csv(ROOT_PATH / 'test.csv').rename(columns={'content': 'comment_text'})
train_df.sample(5) | code |
72081792/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pathlib
ROOT_PATH = pathlib.Path('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/')
MODEL = 'distilbert-base-multilingual-cased'
BATCH_SIZE = 32
EPOCHS = 1
MAX_DOC_LENGTH = 256
train_df = pd.read_csv(ROOT_PATH / 'jigsaw-toxic-comment-train.csv')
valid_df = pd.read_csv(ROOT_PATH / 'validation.csv')
test_df = pd.read_csv(ROOT_PATH / 'test.csv').rename(columns={'content': 'comment_text'})
train_df.sample(5)
(train_df['toxic'].mean(), valid_df['toxic'].mean()) | code |
72081792/cell_15 | [
"text_plain_output_1.png"
] | from tqdm.auto import tqdm
from transformers import TFAutoModel, AutoTokenizer
import pandas as pd
import pathlib
import tensorflow as tf
import tensorflow.keras as keras
ROOT_PATH = pathlib.Path('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/')
MODEL = 'distilbert-base-multilingual-cased'
BATCH_SIZE = 32
EPOCHS = 1
MAX_DOC_LENGTH = 256
train_df = pd.read_csv(ROOT_PATH / 'jigsaw-toxic-comment-train.csv')
valid_df = pd.read_csv(ROOT_PATH / 'validation.csv')
test_df = pd.read_csv(ROOT_PATH / 'test.csv').rename(columns={'content': 'comment_text'})
train_df.sample(5)
class Tokenizer:
def __init__(self, tokenizer, max_doc_length: int, padding=True) -> None:
self.tokenizer = tokenizer
self.max_doc_length = max_doc_length
self.padding = padding
def __call__(self, x):
return self.tokenizer(x, max_length=self.max_doc_length, truncation=True, padding=self.padding, return_tensors='tf')
tokenizer = Tokenizer(AutoTokenizer.from_pretrained(MODEL), MAX_DOC_LENGTH)
def get_tokenized_values(text, tokenizer, batch_size):
input_ids = []
attention_mask = []
for i in tqdm(range(0, len(text), batch_size)):
tokenized_batch = tokenizer(text[i:i + batch_size])
input_ids.append(tokenized_batch['input_ids'])
attention_mask.append(tokenized_batch['attention_mask'])
return (tf.concat(input_ids, axis=0), tf.concat(attention_mask, axis=0))
train_input_ids, train_attention_mask = get_tokenized_values(train_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
valid_input_ids, valid_attention_mask = get_tokenized_values(valid_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
test_input_ids, test_attention_mask = get_tokenized_values(test_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
y_train = train_df.toxic.values
y_valid = valid_df.toxic.values
train_dataset = tf.data.Dataset.from_tensor_slices(((train_input_ids, train_attention_mask), y_train)).repeat().shuffle(2048).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
valid_dataset = tf.data.Dataset.from_tensor_slices(((valid_input_ids, valid_attention_mask), y_valid)).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
test_dataset = tf.data.Dataset.from_tensor_slices((test_input_ids, test_attention_mask)).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
bert_model = TFAutoModel.from_pretrained(MODEL)
input_ids = keras.layers.Input(shape=(MAX_DOC_LENGTH,), dtype=tf.int32)
attention_mask = keras.layers.Input(shape=(MAX_DOC_LENGTH,), dtype=tf.int32)
sequence_output = bert_model(input_ids, attention_mask)[0]
cls_token = sequence_output[:, 0, :]
out = keras.layers.Dense(1, activation='sigmoid')(cls_token)
model = keras.models.Model(inputs=(input_ids, attention_mask), outputs=out)
model.compile(keras.optimizers.Adam(lr=1e-05), loss='binary_crossentropy', metrics=['accuracy', keras.metrics.AUC()])
model.summary() | code |
72081792/cell_17 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tqdm.auto import tqdm
from transformers import TFAutoModel, AutoTokenizer
import pandas as pd
import pathlib
import tensorflow as tf
import tensorflow.keras as keras
ROOT_PATH = pathlib.Path('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/')
MODEL = 'distilbert-base-multilingual-cased'
BATCH_SIZE = 32
EPOCHS = 1
MAX_DOC_LENGTH = 256
train_df = pd.read_csv(ROOT_PATH / 'jigsaw-toxic-comment-train.csv')
valid_df = pd.read_csv(ROOT_PATH / 'validation.csv')
test_df = pd.read_csv(ROOT_PATH / 'test.csv').rename(columns={'content': 'comment_text'})
train_df.sample(5)
class Tokenizer:
def __init__(self, tokenizer, max_doc_length: int, padding=True) -> None:
self.tokenizer = tokenizer
self.max_doc_length = max_doc_length
self.padding = padding
def __call__(self, x):
return self.tokenizer(x, max_length=self.max_doc_length, truncation=True, padding=self.padding, return_tensors='tf')
tokenizer = Tokenizer(AutoTokenizer.from_pretrained(MODEL), MAX_DOC_LENGTH)
def get_tokenized_values(text, tokenizer, batch_size):
input_ids = []
attention_mask = []
for i in tqdm(range(0, len(text), batch_size)):
tokenized_batch = tokenizer(text[i:i + batch_size])
input_ids.append(tokenized_batch['input_ids'])
attention_mask.append(tokenized_batch['attention_mask'])
return (tf.concat(input_ids, axis=0), tf.concat(attention_mask, axis=0))
train_input_ids, train_attention_mask = get_tokenized_values(train_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
valid_input_ids, valid_attention_mask = get_tokenized_values(valid_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
test_input_ids, test_attention_mask = get_tokenized_values(test_df['comment_text'].values.tolist(), tokenizer, BATCH_SIZE * 4)
y_train = train_df.toxic.values
y_valid = valid_df.toxic.values
train_dataset = tf.data.Dataset.from_tensor_slices(((train_input_ids, train_attention_mask), y_train)).repeat().shuffle(2048).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
valid_dataset = tf.data.Dataset.from_tensor_slices(((valid_input_ids, valid_attention_mask), y_valid)).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
test_dataset = tf.data.Dataset.from_tensor_slices((test_input_ids, test_attention_mask)).batch(BATCH_SIZE).prefetch(BATCH_SIZE * 2)
bert_model = TFAutoModel.from_pretrained(MODEL)
input_ids = keras.layers.Input(shape=(MAX_DOC_LENGTH,), dtype=tf.int32)
attention_mask = keras.layers.Input(shape=(MAX_DOC_LENGTH,), dtype=tf.int32)
sequence_output = bert_model(input_ids, attention_mask)[0]
cls_token = sequence_output[:, 0, :]
out = keras.layers.Dense(1, activation='sigmoid')(cls_token)
model = keras.models.Model(inputs=(input_ids, attention_mask), outputs=out)
model.compile(keras.optimizers.Adam(lr=1e-05), loss='binary_crossentropy', metrics=['accuracy', keras.metrics.AUC()])
model.summary()
n_steps = train_input_ids.shape[0] // BATCH_SIZE
train_history = model.fit(train_dataset, steps_per_epoch=n_steps, validation_data=valid_dataset, epochs=EPOCHS) | code |
72081792/cell_12 | [
"text_html_output_1.png"
] | (x[0].shape, x[1].shape) | code |
128026857/cell_13 | [
"image_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
print(f'{red}{col_name}:{res}')
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
print(weights.sort_values(by='z-score'), '\n')
plt.figure(figsize=(3, 2))
sns.kdeplot(weights['percentage'], shade=True)
plt.title(f'Density Plot of {col_name}')
plt.xlabel('Percentage')
plt.ylabel('Density')
plt.show() | code |
128026857/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
df_train.head() | code |
128026857/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns | code |
128026857/cell_29 | [
"text_plain_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_train.shape
prognosis_list = df_train.prognosis.unique()
train_cluster_cols = df_train.filter(like='cluster')
test_cluster_cols = df_test.filter(like='cluster')
train_cluster_cols = df_train.filter(regex='cluster')
test_cluster_cols = df_test.filter(regex='cluster')
cluster_cols = list(train_cluster_cols.columns)
test_cluster_cols.head() | code |
128026857/cell_26 | [
"text_plain_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_train.shape
prognosis_list = df_train.prognosis.unique()
df_train.head() | code |
128026857/cell_11 | [
"text_plain_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
for group_name, group_df in disease_dfs.items():
print(f'{blu}{group_name}{res}:')
print(round(group_df.drop(columns=['prognosis']).sum().sort_values(ascending=False)[:5] / len(group_df), 2), '\n') | code |
128026857/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
plt.figure(figsize=(16, 4))
marginals = train.groupby('prognosis').mean()
plt.imshow(marginals, cmap='hot')
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
plt.show() | code |
128026857/cell_15 | [
"text_plain_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_z = pd.DataFrame(z_scores)
plt.figure(figsize=(16, 4))
plt.imshow(df_z, cmap='coolwarm')
plt.xticks(range(64), df_z.columns, rotation=90)
plt.yticks(range(11), df_z.index)
plt.colorbar()
plt.show() | code |
128026857/cell_31 | [
"text_html_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_train.shape
prognosis_list = df_train.prognosis.unique()
train_cluster_cols = df_train.filter(like='cluster')
test_cluster_cols = df_test.filter(like='cluster')
train_cluster_cols = df_train.filter(regex='cluster')
test_cluster_cols = df_test.filter(regex='cluster')
cluster_cols = list(train_cluster_cols.columns)
prognosis_dict = {df_train.prognosis.unique()[i]: i for i in range(11)}
df_train['prognosis_encoding'] = df_train['prognosis'].apply(lambda x: prognosis_dict[x])
df_train.head() | code |
128026857/cell_14 | [
"text_html_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_train.shape | code |
128026857/cell_22 | [
"image_output_11.png",
"text_plain_output_56.png",
"text_plain_output_35.png",
"image_output_24.png",
"text_plain_output_43.png",
"image_output_46.png",
"text_plain_output_37.png",
"image_output_25.png",
"text_plain_output_5.png",
"text_plain_output_48.png",
"text_plain_output_30.png",
"image_output_47.png",
"text_plain_output_15.png",
"image_output_17.png",
"image_output_30.png",
"text_plain_output_9.png",
"text_plain_output_44.png",
"image_output_14.png",
"image_output_59.png",
"image_output_39.png",
"text_plain_output_40.png",
"image_output_28.png",
"text_plain_output_31.png",
"text_plain_output_20.png",
"image_output_23.png",
"text_plain_output_60.png",
"image_output_34.png",
"image_output_64.png",
"text_plain_output_4.png",
"text_plain_output_64.png",
"text_plain_output_13.png",
"image_output_13.png",
"text_plain_output_52.png",
"text_plain_output_45.png",
"image_output_40.png",
"image_output_5.png",
"image_output_48.png",
"text_plain_output_14.png",
"image_output_18.png",
"text_plain_output_32.png",
"text_plain_output_29.png",
"image_output_58.png",
"text_plain_output_58.png",
"image_output_21.png",
"text_plain_output_49.png",
"text_plain_output_63.png",
"text_plain_output_27.png",
"image_output_52.png",
"text_plain_output_54.png",
"text_plain_output_10.png",
"image_output_60.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_57.png",
"image_output_62.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"image_output_56.png",
"image_output_31.png",
"text_plain_output_47.png",
"text_plain_output_25.png",
"image_output_20.png",
"text_plain_output_18.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"image_output_32.png",
"image_output_53.png",
"text_plain_output_3.png",
"image_output_4.png",
"image_output_51.png",
"text_plain_output_22.png",
"image_output_42.png",
"image_output_35.png",
"text_plain_output_38.png",
"image_output_41.png",
"image_output_57.png",
"text_plain_output_7.png",
"image_output_36.png",
"image_output_8.png",
"image_output_37.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_59.png",
"text_plain_output_8.png",
"text_plain_output_26.png",
"image_output_27.png",
"image_output_54.png",
"image_output_6.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"image_output_45.png",
"text_plain_output_42.png",
"image_output_63.png",
"text_plain_output_53.png",
"text_plain_output_23.png",
"text_plain_output_51.png",
"image_output_12.png",
"text_plain_output_28.png",
"image_output_22.png",
"text_plain_output_2.png",
"image_output_55.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_39.png",
"image_output_3.png",
"image_output_29.png",
"text_plain_output_55.png",
"image_output_44.png",
"image_output_43.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_33.png",
"text_plain_output_62.png",
"image_output_50.png",
"image_output_15.png",
"image_output_49.png",
"text_plain_output_61.png",
"image_output_9.png",
"image_output_19.png",
"image_output_61.png",
"image_output_38.png",
"image_output_26.png",
"text_plain_output_46.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_z = pd.DataFrame(z_scores)
plt.xticks(range(64), df_z.columns, rotation=90)
plt.yticks(range(11), df_z.index)
plt.colorbar()
df_symptom = df_z.T
symptom_cluster = df_symptom.groupby('cluster').groups
symptom_cluster | code |
128026857/cell_27 | [
"image_output_1.png"
] | from colorama import Style, Fore
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'axes.facecolor': '#F8F8F8', 'figure.facecolor': '#F8F8F8', 'axes.edgecolor': '#000000', 'grid.color': '#EBEBE7' + '30', 'font.family': 'serif', 'axes.labelcolor': '#000000', 'xtick.color': '#000000', 'ytick.color': '#000000', 'grid.alpha': 0.4}
sns.set(rc=rc)
palette = ['#302c36', '#037d97', '#91013E', '#C09741', '#EC5B6D', '#90A6B1', '#6ca957', '#D8E3E2']
from sklearn.ensemble import RandomForestClassifier
from vecstack import stacking
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss
from sklearn.model_selection import cross_val_score, train_test_split, RepeatedStratifiedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
from sklearn.naive_bayes import BernoulliNB
import plotly.express as px
import random
import os
from copy import deepcopy
from functools import partial
from itertools import combinations
import random
import gc
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoost, CatBoostClassifier
from catboost import Pool
from colorama import Style, Fore
blk = Style.BRIGHT + Fore.BLACK
mgt = Style.BRIGHT + Fore.MAGENTA
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
res = Style.RESET_ALL
df_train = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv')
df_test = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv')
df_orig_train = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/trainn.csv')
df_orig_test = pd.read_csv('/kaggle/input/vector-borne-disease-prediction/testt.csv')
df_train.columns
primary_columns = list(df_train.drop(columns=['id', 'prognosis']).columns)
train = df_train.copy().drop(columns=['id'])
marginals = train.groupby('prognosis').mean()
plt.xticks(range(64), marginals.columns, rotation=90)
plt.yticks(range(11), marginals.index)
plt.colorbar()
disease_dfs = {group_name: group_df for group_name, group_df in df_train.drop(columns=['id']).groupby(by='prognosis')}
diseses_important_symptoms = {}
import seaborn as sns
disease_combined = df_train.drop(columns=['id']).groupby(by='prognosis').sum()
value_counts = df_train['prognosis'].value_counts()
z_scores = {}
df_high = pd.DataFrame()
for col_name in df_train.columns:
if col_name not in ['id', 'prognosis']:
top_diseases = disease_combined[col_name].sort_values(ascending=False)
top_df = top_diseases.reset_index()
top_df['percentage'] = top_df.apply(lambda row: round(row[col_name] / value_counts[row['prognosis']], 2), axis=1)
weights = top_df.drop(columns=[col_name])
weights['z-score'] = round((weights['percentage'] - weights['percentage'].mean()) / weights['percentage'].std(), 2)
z_scores[col_name] = {row['prognosis']: row['z-score'] for _, row in weights.iterrows()}
df_train.shape
prognosis_list = df_train.prognosis.unique()
train_cluster_cols = df_train.filter(like='cluster')
test_cluster_cols = df_test.filter(like='cluster')
train_cluster_cols.head() | code |
90131759/cell_16 | [
"text_plain_output_1.png"
] | from _csv import reader
from numpy import mean
from numpy import std
from scipy.stats import norm
import pandas as pd
import random
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def separate_by_class(dataset):
separated = dict()
for i in range(len(dataset)):
vector = dataset[i]
class_value = vector[-1]
if class_value not in separated:
separated[class_value] = list()
separated[class_value].append(vector)
return separated
def splitDataset(dataset, ratio):
trainSize = int(len(dataset) * ratio)
trainSet = []
tempSet = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(tempSet))
trainSet.append(tempSet.pop(index))
return [trainSet, tempSet]
def fit_distribution(data):
mu = mean(data)
sigma = std(data)
dist = norm(mu, sigma)
return dist
def probability(X, prior, dist1, dist2, dist3, dist4):
return prior * dist1.pdf(X[0]) * dist2.pdf(X[1]) * dist3.pdf(X[2]) * dist4.pdf(X[3])
def predictedClass(data, prob_setosa, prob_versicolor, prob_virginica):
max_prob = [0] * len(data)
predicted = [''] * len(data)
for i in range(len(data)):
max_prob[i] = max(prob_setosa[i], prob_versicolor[i], prob_virginica[i])
if max_prob[i] == prob_setosa[i]:
predicted[i] = 'Iris-setosa'
elif max_prob[i] == prob_versicolor[i]:
predicted[i] = 'Iris-versicolor'
else:
predicted[i] = 'Iris-virginica'
return predicted
def accuracy(actual, predicted):
correct = 0
predicted['is_equal'] = actual['0'] == predicted['0']
correct = predicted['is_equal'].values.sum()
return correct / float(len(actual)) * 100.0
def implementedClassifier(data):
trainSet, testSet = splitDataset(data, 0.6)
testSet = pd.DataFrame(testSet)
testX = testSet.loc[:, [0, 1, 2, 3]]
actualTestClass = testSet.loc[:, [4]]
actualTestClass.columns = ['0']
separated = separate_by_class(trainSet)
X_seto = separated['Iris-setosa']
X_versi = separated['Iris-versicolor']
X_virgi = separated['Iris-virginica']
X_seto = pd.DataFrame(X_seto)
X_versi = pd.DataFrame(X_versi)
X_virgi = pd.DataFrame(X_virgi)
prior_seto = len(X_seto) / len(trainSet)
prior_versi = len(X_versi) / len(trainSet)
prior_virgi = len(X_virgi) / len(trainSet)
X1_seto = fit_distribution(X_seto[0])
X2_seto = fit_distribution(X_seto[1])
X3_seto = fit_distribution(X_seto[2])
X4_seto = fit_distribution(X_seto[3])
X1_versi = fit_distribution(X_versi[0])
X2_versi = fit_distribution(X_versi[1])
X3_versi = fit_distribution(X_versi[2])
X4_versi = fit_distribution(X_versi[3])
X1_virgi = fit_distribution(X_virgi[0])
X2_virgi = fit_distribution(X_virgi[1])
X3_virgi = fit_distribution(X_virgi[2])
X4_virgi = fit_distribution(X_virgi[3])
X_sample1, y_sample1 = (testX.loc[[16]], actualTestClass.loc[[16]])
X_sample2, y_sample2 = (testX.loc[[30]], actualTestClass.loc[[30]])
prob_seto = probability(X_sample1, prior_seto, X1_seto, X2_seto, X3_seto, X4_seto)
prob_versi = probability(X_sample1, prior_versi, X1_versi, X2_versi, X3_versi, X4_versi)
prob_virgi = probability(X_sample1, prior_virgi, X1_virgi, X2_virgi, X3_virgi, X4_virgi)
predicted = predictedClass(X_sample1, prob_seto, prob_versi, prob_virgi)
prob_seto = probability(X_sample2, prior_seto, X1_seto, X2_seto, X3_seto, X4_seto)
prob_versi = probability(X_sample2, prior_versi, X1_versi, X2_versi, X3_versi, X4_versi)
prob_virgi = probability(X_sample2, prior_virgi, X1_virgi, X2_virgi, X3_virgi, X4_virgi)
predicted = predictedClass(X_sample2, prob_seto, prob_versi, prob_virgi)
prob_seto = probability(testSet, prior_seto, X1_seto, X2_seto, X3_seto, X4_seto)
prob_versi = probability(testSet, prior_versi, X1_versi, X2_versi, X3_versi, X4_versi)
prob_virgi = probability(testSet, prior_virgi, X1_virgi, X2_virgi, X3_virgi, X4_virgi)
predicted = predictedClass(testSet, prob_seto, prob_versi, prob_virgi)
predicted = pd.DataFrame(predicted)
predicted.columns = ['0']
accuracyPercentage = accuracy(actualTestClass, predicted)
if __name__ == '__main__':
filepath = '../input/iris-dataset/iris.data.csv'
data = load_csv(filepath)
for i in range(len(data[0]) - 1):
str_column_to_float(data, i)
implementedClassifier(data) | code |
130015160/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import tensorflow as tf
x_train = x_train / 255
x_test = x_test / 255
i = tf.keras.Input((28, 28, 1))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(i)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
autoencoder = tf.keras.models.Model(i, x)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
r = autoencoder.fit(x=x_train, y=x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test))
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='validation_loss')
plt.xlabel('epoch')
plt.legend() | code |
130015160/cell_2 | [
"text_plain_output_1.png"
] | from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data() | code |
130015160/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
from tensorflow import keras
from keras.datasets import mnist
import matplotlib.pyplot as plt | code |
130015160/cell_7 | [
"text_plain_output_1.png"
] | import tensorflow as tf
x_train = x_train / 255
x_test = x_test / 255
i = tf.keras.Input((28, 28, 1))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(i)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
autoencoder = tf.keras.models.Model(i, x)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
r = autoencoder.fit(x=x_train, y=x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test)) | code |
130015160/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import tensorflow as tf
x_train = x_train / 255
x_test = x_test / 255
i = tf.keras.Input((28, 28, 1))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(i)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
autoencoder = tf.keras.models.Model(i, x)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')
r = autoencoder.fit(x=x_train, y=x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test))
plt.plot(r.history['accuracy'], label='accuracy')
plt.plot(r.history['val_accuracy'], label='val_accuracy')
plt.xlabel('epoch')
plt.legend() | code |
130015160/cell_5 | [
"text_plain_output_1.png"
] | import tensorflow as tf
i = tf.keras.Input((28, 28, 1))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(i)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='tanh', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
autoencoder = tf.keras.models.Model(i, x)
autoencoder.summary() | code |
88090019/cell_1 | [
"text_plain_output_1.png"
] | !pip install -U timm | code |
88090019/cell_17 | [
"text_plain_output_1.png"
] | from torch.utils.data.dataset import Dataset
import cv2
import os
import timm
import torch
conf = {'batch': 16, 'image_dir': '../input/dog-image-dsg/photo/photo', 'image_size': 224, 'tta': 1, 'num_classes': 73, 'num_workers': 2, 'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
modeldef = [{'mdl': 'convnext_base_in22ft1k', 'pth': '../input/d/sinpcw/dog-image-dsg/ex001/ex001/k0/model_best.pth'}, {'mdl': 'convnext_base_in22ft1k', 'pth': '../input/d/sinpcw/dog-image-dsg/ex001/ex001/k1/model_best.pth'}, {'mdl': 'convnext_base_in22ft1k', 'pth': '../input/d/sinpcw/dog-image-dsg/ex001/ex001/k2/model_best.pth'}, {'mdl': 'convnext_base_in22ft1k', 'pth': '../input/d/sinpcw/dog-image-dsg/ex001/ex001/k3/model_best.pth'}, {'mdl': 'convnext_base_in22ft1k', 'pth': '../input/d/sinpcw/dog-image-dsg/ex001/ex001/k4/model_best.pth'}]
def TTA(x, ops):
if ops == 0:
y = x
elif ops == 1:
y = torch.flip(x, [-1])
elif ops == 2:
y = torch.flip(x, [-2])
elif ops == 3:
y = torch.flip(x, [-1, -2])
else:
raise ValueError()
return y
class InferDataset(Dataset):
def __init__(self, image_dir, dataframe, augmentop):
self.image_dir = image_dir
self.dataframe = dataframe
self.augmentop = augmentop
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img = cv2.imread(os.path.join(self.image_dir, self.dataframe.iat[idx, 0] + '.jpg'))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.augmentop(force_apply=False, image=img)['image']
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img)
return img
def GetLoader(image_dir, dataframe, augmentop, batch=1, num_workers=2):
return torch.utils.data.DataLoader(InferDataset(image_dir, dataframe, augmentop), batch_size=batch, shuffle=False, drop_last=False, num_workers=num_workers)
def GetModel(name, num_classes, pth):
model = timm.create_model(model_name=name, num_classes=num_classes, in_chans=3, pretrained=False)
state = torch.load(pth, map_location='cpu')
model.load_state_dict(state, strict=True)
model.eval()
return model
def GetModels(config, mdefs):
models = []
for i, mdef in enumerate(mdefs):
mdl = GetModel(mdef['mdl'], config['num_classes'], mdef['pth']).to(conf['device'])
models.append(mdl)
return models
infer_models = GetModels(conf, modeldef) | code |
88090019/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
infer_df = pd.read_csv('../input/dog-image-dsg/test.csv')
infer_df.head() | code |
72073661/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.preprocessing import OrdinalEncoder
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
df_test = pd.read_csv('../input/30-days-of-ml/test.csv')
kf = KFold(shuffle=True, random_state=5)
cat_cols = [c for c in df_train.columns if 'cat' in c]
ordinal_encoder = OrdinalEncoder()
ordinal_encoder.fit(df_train.loc[:, cat_cols])
def transform_categorical(df, oe=ordinal_encoder):
cat_cols = [c for c in df.columns if 'cat' in c]
df[cat_cols] = oe.transform(df.loc[:, cat_cols])
return df
df_train = transform_categorical(df_train)
df_test = transform_categorical(df_test)
preds = []
for fold, (train_indices, validation_indices) in enumerate(kf.split(df_train)):
xtrain = df_train.iloc[train_indices]
xvalid = df_train.iloc[validation_indices]
ytrain = xtrain.target
yvalid = xvalid.target
xtrain = xtrain.drop(columns=['target'])
xvalid = xvalid.drop(columns=['target'])
model = XGBRegressor(random_state=fold, verbosity=1)
model.fit(xtrain, ytrain)
preds_valid = model.predict(xvalid)
preds_test = model.predict(df_test)
preds.append(preds_test)
print(fold, mean_squared_error(yvalid, preds_valid, squared=False)) | code |
2003218/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/spooky-author-identification/train.csv')
test_data = pd.read_csv('../input/spooky-author-identification/test.csv')
train_data.describe() | code |
2003218/cell_3 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import pandas as pd
train_data = pd.read_csv('../input/spooky-author-identification/train.csv')
test_data = pd.read_csv('../input/spooky-author-identification/test.csv')
def preprocess_text(text, remove_list):
"""
tokens = nltk.pos_tag(nltk.word_tokenize(text))
print(tokens)
good_words = [w for w, wtype in tokens if wtype not in remove_list]
print(good_words)
"""
def clean_word(word):
word = word.lower()
if len(word) > 1 and word[0] == "'":
return word[1:]
return word
tokens = nltk.pos_tag(nltk.word_tokenize(text))
tokens = [(clean_word(word), pos) for word, pos in tokens]
return [(word, pos) for word, pos in tokens if word not in remove_list]
def preprocess_corpus(corpus):
stop_words = set(stopwords.words('english'))
stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}'])
return [preprocess_text(text, stop_words) for text in corpus]
preprocessed_train_corpus = preprocess_corpus(train_data['text'])
preprocessed_test_corpus = preprocess_corpus(test_data['text'])
print(preprocessed_train_corpus[0]) | code |
88085043/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df
df.isnull().sum()
ma_days = [7, 10, 14, 21, 50, 100]
maxi_days = [30, 365, 730]
def calculate_average(df, ma_days):
for ma in ma_days:
column_name = 'MA for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).mean()
def calculate_maximum(df, ma_days):
for ma in maxi_days:
column_name = 'Maximum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).max()
def calculate_minimum(df, ma_days):
for ma in maxi_days:
column_name = 'Minimum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).min()
calculate_average(df, ma_days)
calculate_maximum(df, maxi_days)
calculate_minimum(df, maxi_days)
df['std for 7 days'] = pd.DataFrame.rolling(df['Close'], 7).std()
df['Diff High Low'] = df['High'] - df['Low']
df['Diff Open Close'] = df['Open'] - df['Close']
df['Daily Return'] = df['Close'].pct_change() * 100
df.columns
df.isnull().sum()
from sklearn.model_selection import train_test_split
y = df['Close']
df['Close previous'] = df['Close']
df = df.drop(['Close'], axis=1)
df = df.drop(['Adjusted Close'], axis=1)
df = df.shift(periods=1)
x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.2, shuffle=False)
print(x_test.isnull().sum())
x_train.isnull().sum()
x_train.fillna(x_train.mean(), inplace=True)
x_train.isnull().sum()
df
df.fillna(x_train.mean(), inplace=True) | code |
88085043/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df
df.info()
df.describe()
df.isnull().sum() | code |
88085043/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df | code |
88085043/cell_7 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df
df.isnull().sum()
ma_days = [7, 10, 14, 21, 50, 100]
maxi_days = [30, 365, 730]
def calculate_average(df, ma_days):
for ma in ma_days:
column_name = 'MA for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).mean()
def calculate_maximum(df, ma_days):
for ma in maxi_days:
column_name = 'Maximum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).max()
def calculate_minimum(df, ma_days):
for ma in maxi_days:
column_name = 'Minimum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).min()
calculate_average(df, ma_days)
calculate_maximum(df, maxi_days)
calculate_minimum(df, maxi_days)
df['std for 7 days'] = pd.DataFrame.rolling(df['Close'], 7).std()
df['Diff High Low'] = df['High'] - df['Low']
df['Diff Open Close'] = df['Open'] - df['Close']
df['Daily Return'] = df['Close'].pct_change() * 100
print('number of features {}'.format(len(df.columns)))
df.columns
df.isnull().sum() | code |
88085043/cell_14 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from plotly.offline import init_notebook_mode
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df
df.isnull().sum()
ma_days = [7, 10, 14, 21, 50, 100]
maxi_days = [30, 365, 730]
def calculate_average(df, ma_days):
for ma in ma_days:
column_name = 'MA for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).mean()
def calculate_maximum(df, ma_days):
for ma in maxi_days:
column_name = 'Maximum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).max()
def calculate_minimum(df, ma_days):
for ma in maxi_days:
column_name = 'Minimum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).min()
calculate_average(df, ma_days)
calculate_maximum(df, maxi_days)
calculate_minimum(df, maxi_days)
df['std for 7 days'] = pd.DataFrame.rolling(df['Close'], 7).std()
df['Diff High Low'] = df['High'] - df['Low']
df['Diff Open Close'] = df['Open'] - df['Close']
df['Daily Return'] = df['Close'].pct_change() * 100
df.columns
df.isnull().sum()
from sklearn.model_selection import train_test_split
y = df['Close']
df['Close previous'] = df['Close']
df = df.drop(['Close'], axis=1)
df = df.drop(['Adjusted Close'], axis=1)
df = df.shift(periods=1)
x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.2, shuffle=False)
x_train.isnull().sum()
x_train.fillna(x_train.mean(), inplace=True)
x_train.isnull().sum()
df
df.fillna(x_train.mean(), inplace=True)
from sklearn.feature_selection import SelectKBest
def select_k_features(k=8, x_train=x_train, y_train=y_train):
bestfeatures = SelectKBest(score_func=f_classif, k=10)
fit = bestfeatures.fit(x_train, y_train)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(x_train.columns)
featureScores = pd.concat([dfcolumns, dfscores], axis=1)
featureScores.columns = ['Specs', 'Score']
best_10 = list(featureScores.nlargest(10, 'Score')['Specs'])
for col in x_train:
if col not in best_10:
x_train.drop([col], axis=1, inplace=True)
return x_train
x_train = select_k_features()
x_test = x_test[x_train.columns]
x_train.columns
import sklearn.metrics
import math
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = (np.array(y_true), np.array(y_pred))
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def train_test_and_measure(model_name, model, x_train=x_train, x_test=x_test, y_train=y_train, y_test=y_test):
model = model.fit(x_train, y_train)
prediction = model.predict(x_test)
mse = sklearn.metrics.mean_squared_error(y_test, prediction)
rmse = math.sqrt(mse)
MBE = np.mean(prediction - y_test)
plot_df = pd.DataFrame(y_test)
plot_df['predictions'] = prediction
return prediction
import time
from datetime import datetime
import plotly_express as px
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
regression_model = LinearRegression()
adaBoostRegressor_model = AdaBoostRegressor(random_state=0, n_estimators=100)
randomForestRegressor_model = RandomForestRegressor(max_depth=2, random_state=0)
MLPRegressor = MLPRegressor(random_state=1, max_iter=3000)
train_test_and_measure('regression_model', regression_model)
train_test_and_measure('adaBoostRegressor_model', adaBoostRegressor_model)
train_test_and_measure('randomForestRegressor_model', randomForestRegressor_model)
pred = train_test_and_measure('MLPRegressor', MLPRegressor)
print(MLPRegressor.score(x_test, y_test)) | code |
88085043/cell_10 | [
"text_html_output_1.png"
] | from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import pandas as pd
def load_stock(name):
df = pd.read_csv('../input/stock-market-data/stock_market_data/nasdaq/csv/{}.csv'.format(name))
df.set_index('Date', inplace=True)
return df
names = ['AAL', 'AAPL']
stocks = [load_stock(n) for n in names]
df = stocks[0][-1500:]
df
df.isnull().sum()
ma_days = [7, 10, 14, 21, 50, 100]
maxi_days = [30, 365, 730]
def calculate_average(df, ma_days):
for ma in ma_days:
column_name = 'MA for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).mean()
def calculate_maximum(df, ma_days):
for ma in maxi_days:
column_name = 'Maximum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).max()
def calculate_minimum(df, ma_days):
for ma in maxi_days:
column_name = 'Minimum for %s days' % str(ma)
df[column_name] = pd.DataFrame.rolling(df['Close'], ma).min()
calculate_average(df, ma_days)
calculate_maximum(df, maxi_days)
calculate_minimum(df, maxi_days)
df['std for 7 days'] = pd.DataFrame.rolling(df['Close'], 7).std()
df['Diff High Low'] = df['High'] - df['Low']
df['Diff Open Close'] = df['Open'] - df['Close']
df['Daily Return'] = df['Close'].pct_change() * 100
df.columns
df.isnull().sum()
from sklearn.model_selection import train_test_split
y = df['Close']
df['Close previous'] = df['Close']
df = df.drop(['Close'], axis=1)
df = df.drop(['Adjusted Close'], axis=1)
df = df.shift(periods=1)
x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.2, shuffle=False)
x_train.isnull().sum()
x_train.fillna(x_train.mean(), inplace=True)
x_train.isnull().sum()
df
df.fillna(x_train.mean(), inplace=True)
from sklearn.feature_selection import SelectKBest
def select_k_features(k=8, x_train=x_train, y_train=y_train):
bestfeatures = SelectKBest(score_func=f_classif, k=10)
fit = bestfeatures.fit(x_train, y_train)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(x_train.columns)
featureScores = pd.concat([dfcolumns, dfscores], axis=1)
featureScores.columns = ['Specs', 'Score']
best_10 = list(featureScores.nlargest(10, 'Score')['Specs'])
for col in x_train:
if col not in best_10:
x_train.drop([col], axis=1, inplace=True)
return x_train
x_train = select_k_features()
x_test = x_test[x_train.columns]
x_train.columns | code |
106198232/cell_21 | [
"image_output_1.png"
] | import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3,3), 0, borderType=cv2.BORDER_CONSTANT)
#plt.imshow(img_gaussian_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_median_opencv = cv2.medianBlur(sim[0][1].reshape(40,40), 1)
#plt.imshow(img_median_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_median_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = cv2.cvtColor(np.array(sim[0][1], dtype='ubyte'), cv2.COLOR_BGR2RGB)
image_bilateral_opencv = cv2.bilateralFilter(img, 5, 40, 100, borderType=cv2.BORDER_CONSTANT)
plt.imshow(image_bilateral_opencv) | code |
106198232/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
print(sim[0][1].shape) | code |
106198232/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import random
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
plt.subplots(10, 5, figsize=(20, 30))
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
plt.subplot(10, 10, 1 + i)
plt.imshow(img)
plt.show() | code |
106198232/cell_30 | [
"image_output_2.png",
"image_output_1.png"
] | from skimage.filters import gaussian
from skimage.filters import median
from skimage.morphology import disk
from skimage.restoration import denoise_bilateral
import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3,3), 0, borderType=cv2.BORDER_CONSTANT)
#plt.imshow(img_gaussian_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_median_opencv = cv2.medianBlur(sim[0][1].reshape(40,40), 1)
#plt.imshow(img_median_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_median_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = cv2.cvtColor(np.array(sim[0][1], dtype='ubyte'), cv2.COLOR_BGR2RGB)
image_bilateral_opencv = cv2.bilateralFilter(img, 5, 40, 100, borderType=cv2.BORDER_CONSTANT)
img_gaussian_scikit = gaussian(sim[0][1], sigma=1, mode='constant', cval=0)
plt.imshow(img_gaussian_scikit)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_scikit)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = sim[0][1].reshape(40,40)
img_median_scikit = median(img, disk(1), mode='constant', cval=0)
#plt.imshow(img_median_scikit)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_median_scikit)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_bilateral_scikit = denoise_bilateral(sim[0][1].reshape(40, 40), sigma_color=0.05, sigma_spatial=15, multichannel=False)
fig, axes = plt.subplots(ncols=2, figsize=(15, 5))
axes[0].imshow(sim[0][1], label='label')
axes[1].imshow(img_bilateral_scikit)
axes[0].set_title('noised')
axes[1].set_title('denoised')
plt.show() | code |
106198232/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import os
os.listdir('../input') | code |
106198232/cell_19 | [
"text_plain_output_1.png"
] | import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3,3), 0, borderType=cv2.BORDER_CONSTANT)
#plt.imshow(img_gaussian_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_median_opencv = cv2.medianBlur(sim[0][1].reshape(40, 40), 1)
fig, axes = plt.subplots(ncols=2, figsize=(15, 5))
axes[0].imshow(sim[0][1], label='label')
axes[1].imshow(img_median_opencv)
axes[0].set_title('noised')
axes[1].set_title('denoised')
plt.show() | code |
106198232/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import random
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
plt.subplots(10, 5, figsize=(20, 30))
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
plt.subplot(10, 10, 1 + i)
plt.imshow(img)
plt.show() | code |
106198232/cell_15 | [
"image_output_1.png"
] | import numpy as np
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
img3 = sim[0][1].reshape(40, 40)
print(img3.shape) | code |
106198232/cell_16 | [
"text_plain_output_1.png"
] | import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
print(img.shape)
plt.imshow(img) | code |
106198232/cell_17 | [
"text_plain_output_1.png"
] | import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3, 3), 0, borderType=cv2.BORDER_CONSTANT)
fig, axes = plt.subplots(ncols=2, figsize=(15, 5))
axes[0].imshow(sim[0][1], label='label')
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title('noised')
axes[1].set_title('denoised')
plt.show() | code |
106198232/cell_24 | [
"image_output_1.png"
] | from skimage.filters import gaussian
import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3,3), 0, borderType=cv2.BORDER_CONSTANT)
#plt.imshow(img_gaussian_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_median_opencv = cv2.medianBlur(sim[0][1].reshape(40,40), 1)
#plt.imshow(img_median_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_median_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = cv2.cvtColor(np.array(sim[0][1], dtype='ubyte'), cv2.COLOR_BGR2RGB)
image_bilateral_opencv = cv2.bilateralFilter(img, 5, 40, 100, borderType=cv2.BORDER_CONSTANT)
img_gaussian_scikit = gaussian(sim[0][1], sigma=1, mode='constant', cval=0)
plt.imshow(img_gaussian_scikit)
fig, axes = plt.subplots(ncols=2, figsize=(15, 5))
axes[0].imshow(sim[0][1], label='label')
axes[1].imshow(img_gaussian_scikit)
axes[0].set_title('noised')
axes[1].set_title('denoised')
plt.show() | code |
106198232/cell_14 | [
"image_output_1.png"
] | import h5py
import numpy as np
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
img2 = np.expand_dims(sim[0][1], axis=2)
print(img2.shape) | code |
106198232/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from skimage.filters import gaussian
from skimage.filters import median
from skimage.morphology import disk
import cv2
import h5py
import matplotlib.pyplot as plt
import numpy as np
import random
event = h5py.File('../input/insar-dataset/insar_data_event.hdf5', 'r')
disp = np.zeros(shape=(16000, 40, 40))
sim = np.zeros(shape=(16000, 9, 40, 40, 1))
topo = np.zeros(shape=(16000, 1, 40, 40, 1))
for i in range(len(event.keys())):
for j in range(100):
disp[i * 100 + j] = np.array(event['event{}'.format(i)]['displacement'])
sim[i * 100 + j] = np.array(event['event{}'.format(i)]['simulation'])
topo[i * 100 + j] = np.array(event['event{}'.format(i)]['topography'])
for i in range(100):
ind = random.randint(0, 15999)
img = disp[ind]
for i in range(100):
ind = random.randint(0, 15999)
img = sim[ind][1]
img2 = np.expand_dims(sim[0][1], axis=2)
img = np.broadcast_to(sim[0][1], (40, 40, 3)).copy()
img_gaussian_opencv = cv2.GaussianBlur(sim[0][1], (3,3), 0, borderType=cv2.BORDER_CONSTANT)
#plt.imshow(img_gaussian_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img_median_opencv = cv2.medianBlur(sim[0][1].reshape(40,40), 1)
#plt.imshow(img_median_opencv)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_median_opencv)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = cv2.cvtColor(np.array(sim[0][1], dtype='ubyte'), cv2.COLOR_BGR2RGB)
image_bilateral_opencv = cv2.bilateralFilter(img, 5, 40, 100, borderType=cv2.BORDER_CONSTANT)
img_gaussian_scikit = gaussian(sim[0][1], sigma=1, mode='constant', cval=0)
plt.imshow(img_gaussian_scikit)
fig, axes = plt.subplots(ncols=2, figsize=(15,5))
axes[0].imshow(sim[0][1], label="label")
axes[1].imshow(img_gaussian_scikit)
axes[0].set_title("noised")
axes[1].set_title("denoised")
plt.show()
img = sim[0][1].reshape(40, 40)
img_median_scikit = median(img, disk(1), mode='constant', cval=0)
fig, axes = plt.subplots(ncols=2, figsize=(15, 5))
axes[0].imshow(sim[0][1], label='label')
axes[1].imshow(img_median_scikit)
axes[0].set_title('noised')
axes[1].set_title('denoised')
plt.show() | code |
122256098/cell_21 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
rf = RandomForestClassifier()
rf.fit(tr_x, tr_y)
Accuracy_RandomForest = rf.score(cv_x, cv_y)
test = pd.read_csv('test.csv')
test_x = test[['Pclass', 'Sex']]
test_x['Sex'].replace('male', 1, inplace=True)
test_x['Sex'].replace('female', 0, inplace=True)
prd = rf.predict(test_x)
prd | code |
122256098/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
rf = RandomForestClassifier()
rf.fit(tr_x, tr_y)
Accuracy_RandomForest = rf.score(cv_x, cv_y)
print('Accuracy = {}%'.format(Accuracy_RandomForest * 100)) | code |
122256098/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
train_x.head() | code |
122256098/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
op = test[['PassengerId']]
op.to_csv('Submission.csv', index=False) | code |
122256098/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.head() | code |
122256098/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
rf = RandomForestClassifier()
rf.fit(tr_x, tr_y)
Accuracy_RandomForest = rf.score(cv_x, cv_y)
test = pd.read_csv('test.csv')
test_x = test[['Pclass', 'Sex']]
test_x['Sex'].replace('male', 1, inplace=True)
test_x['Sex'].replace('female', 0, inplace=True)
prd = rf.predict(test_x)
op = test[['PassengerId']]
op['Survived'] = prd | code |
122256098/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum() | code |
122256098/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test_x = test[['Pclass', 'Sex']]
test_x['Sex'].replace('male', 1, inplace=True)
test_x['Sex'].replace('female', 0, inplace=True)
test_x.head() | code |
122256098/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_x.head() | code |
122256098/cell_18 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test_x = test[['Pclass', 'Sex']]
test_x['Sex'].replace('male', 1, inplace=True)
test_x['Sex'].replace('female', 0, inplace=True)
test_x.head() | code |
122256098/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_y = train[['Survived']]
train_y.head() | code |
122256098/cell_15 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
lgr = LogisticRegression()
lgr.fit(tr_x, tr_y)
Accuracy_LogisticRegression = lgr.score(cv_x, cv_y)
print('Accuracy = {}%'.format(Accuracy_LogisticRegression * 100)) | code |
122256098/cell_16 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test.head() | code |
122256098/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test_x = test[['Pclass', 'Sex']]
test_x.head() | code |
122256098/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
op = test[['PassengerId']]
op.head() | code |
122256098/cell_14 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
lgr = LogisticRegression()
lgr.fit(tr_x, tr_y) | code |
122256098/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
print(tr_x.head())
print(tr_y.head()) | code |
122256098/cell_12 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
train = pd.read_csv('train.csv')
train.isnull().sum()
train_x = train[['Pclass', 'Sex']]
train_y = train[['Survived']]
train_x['Sex'].replace('male', 1, inplace=True)
train_x['Sex'].replace('female', 0, inplace=True)
tr_x, cv_x, tr_y, cv_y = train_test_split(train_x, train_y, test_size=0.3)
rf = RandomForestClassifier()
rf.fit(tr_x, tr_y) | code |
122256098/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.describe() | code |
122248046/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0]
df = df.drop('Destination', axis=1)
popular_cities = df['City'].value_counts()[:10]
popular_country = df['Country'].value_counts()[:10]
transport_counts = df['Transportation type'].value_counts()[:5]
plt.axis('equal')
sns.countplot(data=df, x=df['Traveler age'], palette='bright')
plt.title('Number of trips by age group')
plt.xticks(rotation=90)
plt.show() | code |
122248046/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum() | code |
122248046/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0]
df = df.drop('Destination', axis=1)
popular_cities = df['City'].value_counts()[:10]
popular_country = df['Country'].value_counts()[:10]
transport_counts = df['Transportation type'].value_counts()[:5]
transport_counts.plot.pie(labels=transport_counts.index.tolist(), autopct='%1.1f%%', startangle=90)
plt.axis('equal')
plt.title('Transportation Types')
plt.show() | code |
122248046/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum() | code |
122248046/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0]
df = df.drop('Destination', axis=1)
duractions_count = df['Duration (days)'].value_counts().idxmax()
print('Most common travel duration is', duractions_count, 'days.') | code |
122248046/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import plotly.express as px
from plotly.offline import iplot
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122248046/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0] | code |
122248046/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0]
df = df.drop('Destination', axis=1)
popular_cities = df['City'].value_counts()[:10]
popular_country = df['Country'].value_counts()[:10]
popular_country.plot.bar()
plt.title('10 most popular travel countries')
plt.xlabel('Countries')
plt.ylabel('Trips') | code |
122248046/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/traveler-trip-data/Travel details dataset.csv')
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.shape[0]
df.info() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.