path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2007984/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
print(train.shape)
train.head() | code |
2007984/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
x_train.reshape | code |
2007984/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda, Flatten
from keras.optimizers import Adam, RMSprop
from sklearn.model_selection import train_test_split
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2007984/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px | code |
2007984/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense,Dropout, Activation,Lambda,Flatten
from keras.models import Sequential
from keras.optimizers import Adam , RMSprop
from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
x_train.reshape
model = Sequential()
model.add(Lambda(standardize, input_shape=(28, 28, 1)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
from keras.preprocessing import image
gen = image.ImageDataGenerator()
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34)
batches = gen.flow(X_train, Y_train, batch_size=64)
val_batches = gen.flow(X_val, Y_val, batch_size=64)
cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n)
model.optimizer.lr = 0.01
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, Y_train, batch_size=64)
history = model.fit_generator(batches, batches.n, nb_epoch=1)
history.history | code |
2007984/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
x_train.reshape
from keras.preprocessing import image
gen = image.ImageDataGenerator()
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34)
batches = gen.flow(X_train, Y_train, batch_size=64)
val_batches = gen.flow(X_val, Y_val, batch_size=64) | code |
2007984/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28) | code |
2007984/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.utils.np_utils import to_categorical
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
y_train.shape
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10) | code |
2007984/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.utils.np_utils import to_categorical
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
y_train.shape
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape | code |
2007984/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
print(test.shape)
test.head() | code |
2007984/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
index = 678
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape
print(y_train[index])
plt.plot(y_train[index])
plt.xticks(range(10))
plt.show() | code |
2007984/cell_35 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense,Dropout, Activation,Lambda,Flatten
from keras.models import Sequential
from keras.optimizers import Adam , RMSprop
from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
x_train.reshape
model = Sequential()
model.add(Lambda(standardize, input_shape=(28, 28, 1)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
from keras.preprocessing import image
gen = image.ImageDataGenerator()
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34)
batches = gen.flow(X_train, Y_train, batch_size=64)
val_batches = gen.flow(X_val, Y_val, batch_size=64)
cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n)
model.optimizer.lr = 0.01
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, Y_train, batch_size=64)
history = model.fit_generator(batches, batches.n, nb_epoch=1)
preds = model.predict_classes(x_test, verbose=0)
subs = pd.DataFrame({'ImageId': list(range(1, len(preds) + 1)), 'Label': preds})
subs.to_csv('sub1.csv', index=False, header=True) | code |
2007984/cell_31 | [
"text_plain_output_1.png"
] | from keras.layers import Dense,Dropout, Activation,Lambda,Flatten
from keras.models import Sequential
from keras.optimizers import Adam , RMSprop
from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_train.shape
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
x_train.reshape
model = Sequential()
model.add(Lambda(standardize, input_shape=(28, 28, 1)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
from keras.preprocessing import image
gen = image.ImageDataGenerator()
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=34)
batches = gen.flow(X_train, Y_train, batch_size=64)
val_batches = gen.flow(X_val, Y_val, batch_size=64)
cache = model.fit_generator(batches, batches.n, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.n)
model.optimizer.lr = 0.01
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, Y_train, batch_size=64)
history = model.fit_generator(batches, batches.n, nb_epoch=1) | code |
2007984/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense,Dropout, Activation,Lambda,Flatten
from keras.models import Sequential
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
(x_train.shape, x_test.shape)
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
def standardize(x):
return (x - mean_px) / std_px
model = Sequential()
model.add(Lambda(standardize, input_shape=(28, 28, 1)))
model.add(Flatten())
model.add(Dense(10, activation='softmax')) | code |
2007984/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
y_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
index = 678
plt.imshow(x_train[index])
print('Number is', y_train[index]) | code |
2007984/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28)
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) | code |
2007984/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
x_train = train[:, 1:].values.astype('float32')
y_train = train[:, 0].values.astype('int32')
x_test = test.values.astype('float32')
x_train.shape | code |
88091391/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
df['CarName'].unique() | code |
88091391/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
def check_df(dataframe, head=5):
pass
check_df(df) | code |
88091391/cell_4 | [
"image_output_1.png"
] | import os
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88091391/cell_30 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
df_v = pd.DataFrame(df['fueltype'].value_counts())
plot = df_v.plot.pie(y='fueltype', figsize=(5, 5)) | code |
88091391/cell_33 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
ax = sns.pairplot(df[num_col])
f= plt.figure(figsize=(12,5))
ax=f.add_subplot(121)
sns.distplot(df[(df.fueltype== 'gas')]["price"],color='b',ax=ax)
ax.set_title('Distribution of price of gas vehicles')
ax=f.add_subplot(122)
sns.distplot(df[(df.fueltype == 'diesel')]['price'],color='r',ax=ax)
ax.set_title('Distribution of ages of diesel vehicles');
sns.boxplot(x='fueltype', y='price', data=df, palette='Pastel1') | code |
88091391/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
num_col | code |
88091391/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T | code |
88091391/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
ax = sns.pairplot(df[num_col]) | code |
88091391/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
cat_col | code |
88091391/cell_32 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
ax = sns.pairplot(df[num_col])
f = plt.figure(figsize=(12, 5))
ax = f.add_subplot(121)
sns.distplot(df[df.fueltype == 'gas']['price'], color='b', ax=ax)
ax.set_title('Distribution of price of gas vehicles')
ax = f.add_subplot(122)
sns.distplot(df[df.fueltype == 'diesel']['price'], color='r', ax=ax)
ax.set_title('Distribution of ages of diesel vehicles') | code |
88091391/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
ax = sns.pairplot(df[num_col])
plt.figure(figsize=(20, 15))
plt.subplot(3, 3, 1)
sns.boxplot(x='doornumber', y='price', data=df)
plt.subplot(3, 3, 2)
sns.boxplot(x='fueltype', y='price', data=df)
plt.subplot(3, 3, 3)
sns.boxplot(x='aspiration', y='price', data=df)
plt.subplot(3, 3, 4)
sns.boxplot(x='carbody', y='price', data=df)
plt.subplot(3, 3, 5)
sns.boxplot(x='enginetype', y='price', data=df)
plt.subplot(3, 3, 6)
sns.boxplot(x='fuelsystem', y='price', data=df)
plt.show() | code |
88091391/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
df['CarName'].unique() | code |
88091391/cell_35 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
ax = sns.pairplot(df[num_col])
df_v=pd.DataFrame(df['fueltype'].value_counts())
plot = df_v.plot.pie(y='fueltype', figsize=(5, 5))
f= plt.figure(figsize=(12,5))
ax=f.add_subplot(121)
sns.distplot(df[(df.fueltype== 'gas')]["price"],color='b',ax=ax)
ax.set_title('Distribution of price of gas vehicles')
ax=f.add_subplot(122)
sns.distplot(df[(df.fueltype == 'diesel')]['price'],color='r',ax=ax)
ax.set_title('Distribution of ages of diesel vehicles');
df_v = pd.DataFrame(df['aspiration'].value_counts())
plot = df_v.plot.pie(y='aspiration', figsize=(5, 5)) | code |
88091391/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v=pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index':'car_name','CarName': 'count'})
plot = sns.barplot(y='car_name',x='count',data=df_v)
plot=plt.setp(plot.get_xticklabels(), rotation=80)
plt.figure(figsize=(8, 8))
plt.title('Car Price Distribution Plot')
sns.distplot(df['price']) | code |
88091391/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
df.shape
df = df.drop(['car_ID'], axis=1)
cat_col = df.select_dtypes(include=['object']).columns
num_col = df.select_dtypes(exclude=['object']).columns
df_v = pd.DataFrame(df['CarName'].value_counts()).reset_index().rename(columns={'index': 'car_name', 'CarName': 'count'})
plot = sns.barplot(y='car_name', x='count', data=df_v)
plot = plt.setp(plot.get_xticklabels(), rotation=80) | code |
88091391/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.describe().T
outliers = ['price']
plt.rcParams['figure.figsize'] = [8, 8]
sns.boxplot(data=df[outliers], orient='v', palette='Set1', whis=1.5, saturation=1, width=0.7)
plt.title('Outliers Variable Distribution', fontsize=14, fontweight='bold')
plt.ylabel('Price Range', fontweight='bold')
plt.xlabel('Continuous Variable', fontweight='bold')
df.shape | code |
88091391/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv')
df.head() | code |
18115740/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from keras.optimizers import *
import keras
from keras.layers import *
from keras.models import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import * | code |
18115740/cell_8 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
train_df['Test'] = False
test_df = pd.read_csv('../input/test.csv')
test_df['Test'] = True
df = pd.concat([train_df, test_df], sort=False)
corr = abs(train_df.corr())
price_corr = corr['SalePrice'].sort_values()
columns = list(price_corr[price_corr > 0.4].index)
columns.append('Test')
columns.append('Id')
prepared_data = df[columns].copy()
id_col = df['Id']
uniq = prepared_data.apply(lambda x: x.nunique())
idxs = np.array((uniq <= 10) & (uniq > 2))
dummies_columns = prepared_data.iloc[:, idxs].columns
cont_cols = set(prepared_data.columns) - set(dummies_columns)
have_garage_cards = np.where(prepared_data['GarageCars'] >= 1, 1, 0)
have_full_bath = np.where(prepared_data['FullBath'] >= 1, 1, 0)
is_new = np.where(prepared_data['YearBuilt'] > 2005, 1, 0)
have_firplaces = np.where(prepared_data['Fireplaces'] >= 1, 1, 0)
prepared_data = pd.get_dummies(prepared_data, columns=dummies_columns)
prepared_data.fillna(0, inplace=True)
price_multy = prepared_data['SalePrice'].max()
scaler = MinMaxScaler()
prepared_data.loc[:, cont_cols] /= prepared_data.loc[:, cont_cols].max()
prepared_data[['Id', 'Test']] = df[['Id', 'Test']]
prepared_data['HaveCar'] = have_garage_cards
prepared_data['HaveFullBath'] = have_garage_cards
prepared_data['IsNew'] = is_new
prepared_data['HaveFirplaces'] = have_firplaces
train_data = prepared_data.loc[prepared_data['Test'] != True]
train_y = train_data['SalePrice']
train_x = train_data.drop(columns=['SalePrice', 'Id'])
X_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.2, random_state=42)
from sklearn.linear_model import *
from sklearn.metrics import *
results = []
models = [RidgeCV(), LinearRegression(), Ridge(), Lasso(alpha=0.1), BayesianRidge()]
fit_models = []
for regr in models:
regr.fit(X_train, y_train)
pred = regr.predict(X_test)
mse = mean_squared_error(pred, y_test)
results.append(mse)
fit_models.append(regr)
best_idx = np.argmin(results)
regr = fit_models[best_idx]
result_df = prepared_data.loc[prepared_data['Test'] == True]
id_col = result_df['Id']
result_df = result_df.drop(columns=['SalePrice', 'Id'])
predictions = regr.predict(result_df)
res_df = pd.DataFrame(predictions * price_multy, columns=['SalePrice'])
res_df['Id'] = id_col
res_df.to_csv('sub.csv', index=None, header=True)
res_df.head() | code |
18115740/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
train_df['Test'] = False
test_df = pd.read_csv('../input/test.csv')
test_df['Test'] = True
df = pd.concat([train_df, test_df], sort=False)
corr = abs(train_df.corr())
price_corr = corr['SalePrice'].sort_values()
columns = list(price_corr[price_corr > 0.4].index)
columns.append('Test')
columns.append('Id')
df[columns].head() | code |
2026584/cell_4 | [
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
proc_data = data[data.color == 'BLACK'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.bar(y_pos, corrCount)
plt.xticks(y_pos, dogSpecies)
plt.show() | code |
2026584/cell_6 | [
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
proc_data = data[data.color == 'BLACK'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BROWN'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.bar(y_pos, corrCount)
plt.xticks(y_pos, dogSpecies)
plt.show() | code |
2026584/cell_2 | [
"image_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
print(colorData[0:5]) | code |
2026584/cell_8 | [
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
proc_data = data[data.color == 'BLACK'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BROWN'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'WHITE'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.bar(y_pos, corrCount)
plt.xticks(y_pos, dogSpecies)
plt.show() | code |
2026584/cell_10 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
proc_data = data[data.color == 'BLACK'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BROWN'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'WHITE'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BLK WHT'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.bar(y_pos, corrCount)
plt.xticks(y_pos, dogSpecies)
plt.show() | code |
2026584/cell_12 | [
"image_output_1.png"
] | from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
data = pd.read_csv('../input/Health_AnimalBites.csv')
colorData = data.color.value_counts()
proc_data = data[data.color == 'BLACK'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BROWN'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'WHITE'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'BLK WHT'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.xticks(y_pos, dogSpecies)
proc_data = data[data.color == 'TAN'].BreedIDDesc.value_counts()
dogSpecies = list(proc_data.keys())
corrCount = list(proc_data[proc_data.keys()])
dogSpecies = dogSpecies[0:5]
corrCount = corrCount[0:5]
y_pos = np.arange(len(dogSpecies))
plt.bar(y_pos, corrCount)
plt.xticks(y_pos, dogSpecies)
plt.show() | code |
122260010/cell_13 | [
"text_plain_output_1.png"
] | import numpy
import numpy
import numpy
sampleArray = numpy.array([[3, 8, 9, 11], [15, 18, 21, 24], [27, 29, 33, 34], [39, 42, 45, 48], [51, 52, 57, 53]])
import numpy
sampleArray = numpy.array([[3, 8, 9, 11], [15, 18, 21, 24], [27, 29, 33, 34], [39, 42, 45, 48], [51, 52, 57, 53]])
sampleArray[0:5:2, 1:4:2] | code |
122260010/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr
r, c = np.shape(arr)
r
arr = np.ones((10, 10))
arr[0:-1:2, 0:-1:2] = 0
arr | code |
122260010/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr
r, c = np.shape(arr)
r | code |
122260010/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr
r, c = np.shape(arr)
r
for i in range(r):
print(np.unique(arr[i])) | code |
122260010/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr
r, c = np.shape(arr)
r
arr = np.ones((10, 10))
arr[0:-1:2, 0:-1:2] = 0
arr
Input: np.array([1, 2, 9, 1, 3, 7, 1, 2, 10])
arr2 = np.array([1, 2, 9, 1, 3, 7, 1, 2, 10])
for i in range(len(arr2)):
try:
if arr2[i] > arr2[i - 1] and arr2[i] > arr2[i + 1]:
print(arr2[i])
except:
print('') | code |
122260010/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr | code |
122260010/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
arr
r, c = np.shape(arr)
r
for i in range(r):
print(np.unique(arr[i])) | code |
73079382/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
df_test_shop = set(df_test.shop_id.unique())
df_train_shop = set(df_sale_train.shop_id.unique())
print(df_test.head(1))
print(df_sale_train.head(1))
print(df_submission.head(1)) | code |
73079382/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
print(len(df_test_item))
print(len(df_train_item))
print(df_sale_train.columns) | code |
73079382/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib as plot
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73079382/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
print('items En ' + str(len(df_item_en)))
print('submission En ' + str(len(df_submission_en)))
print('items category En ' + str(len(df_item_cat_en)))
print('sales train En ' + str(len(df_sale_train_en)))
print('shops En ' + str(len(df_shop_en)))
print('tests En ' + str(len(df_test_en)))
print('----------------------------------------')
print('items ' + str(len(df_item)))
print('submission ' + str(len(df_submission)))
print('items category ' + str(len(df_item_cat)))
print('sales train ' + str(len(df_sale_train)))
print('shops ' + str(len(df_shop)))
print('tests ' + str(len(df_test))) | code |
73079382/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
print(df_test.groupby('item_id').count())
print(df_sale_train.groupby('item_id').count()) | code |
73079382/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
df_test_shop = set(df_test.shop_id.unique())
df_train_shop = set(df_sale_train.shop_id.unique())
df_sale_train['date'] = pd.to_datetime(df_sale_train['date'])
print(df_sale_train.date.head(10))
print(df_sale_train.date.value_counts().head(10))
train_dates = df_sale_train.date.value_counts()
train_dates = train_dates.sort_index()
print(train_dates.head(10)) | code |
73079382/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
df_test_shop = set(df_test.shop_id.unique())
df_train_shop = set(df_sale_train.shop_id.unique())
df_sale_train['date'] = pd.to_datetime(df_sale_train['date'])
train_dates = df_sale_train.date.value_counts()
train_dates = train_dates.sort_index()
train_dates.plot(kind='line')
plt.show() | code |
73079382/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
df_test_shop = set(df_test.shop_id.unique())
df_train_shop = set(df_sale_train.shop_id.unique())
print(len(df_test_shop))
print(len(df_train_shop)) | code |
73079382/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
df_test_item = set(df_test.item_id.unique())
df_train_item = set(df_sale_train.item_id.unique())
df_test_shop = set(df_test.shop_id.unique())
df_train_shop = set(df_sale_train.shop_id.unique())
df_item.head(1)
print(df_test.columns)
print(df_sale_train.columns)
print(df_submission.columns)
print(df_item.columns)
print(df_item_cat.columns)
print(df_shop.columns) | code |
73079382/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_item_en = pd.read_csv('/kaggle/input/english-converted-datasets/items.csv')
df_submission_en = pd.read_csv('/kaggle/input/english-converted-datasets/sample_submission.csv')
df_item_cat_en = pd.read_csv('/kaggle/input/english-converted-datasets/item_categories.csv')
df_sale_train_en = pd.read_csv('/kaggle/input/english-converted-datasets/sales_train.csv')
df_shop_en = pd.read_csv('/kaggle/input/english-converted-datasets/shops.csv')
df_test_en = pd.read_csv('/kaggle/input/english-converted-datasets/test.csv')
df_item = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
df_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
df_item_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
df_sale_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
df_shop = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
df_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
print(df_item.columns)
print(df_submission.columns)
print(df_item_cat.columns)
print(df_sale_train.columns)
print(df_shop.columns)
print(df_test.columns)
print(df_item.dtypes)
print(df_submission.dtypes)
print(df_item_cat.dtypes)
print(df_sale_train.dtypes)
print(df_shop.dtypes)
print(df_test.dtypes) | code |
90156742/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
plt.xlim(2004, 2017)
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
np.random.seed(14)
NUM = 1000
p = 0.5
q = 1 - p
feat1_cl1 = np.random.normal(10, 1, int(NUM * p))
feat2_cl1 = np.random.normal(4, 2, int(NUM * p)) * 0.2 * feat1_cl1 + np.random.random(int(NUM * p))
t_cl1 = np.array([0] * int(NUM * p))
feat1_cl2 = np.random.normal(3, 2, int(NUM * q))
feat2_cl2 = np.random.normal(-1, 1, int(NUM * q)) * 2 * feat1_cl2 + np.random.random(int(NUM * q))
t_cl2 = np.array([1] * int(NUM * q))
plt.style.use('seaborn')
boston = pd.read_csv('/kaggle/input/others/House_Price.csv')
import seaborn as sns
sns.distplot(boston['price'])
plt.show() | code |
90156742/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
plt.plot(data['year'], data['manager_m'] - data['manager_f'], label='ManagerInnen')
plt.plot(data['year'], data['operator_m'] - data['operator_f'], label='OperatorInnen')
plt.plot(data['year'], data['sales_m'] - data['sales_f'], label='Sales')
plt.plot(data['year'], np.zeros(len(data['year'])), color='red', linestyle='--')
plt.title('Lohndifferenz Entwicklung')
plt.ylabel('Differenz in US $')
plt.xlabel('Jahre')
plt.legend()
plt.xlim(2004, 2017)
plt.show() | code |
90156742/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data | code |
90156742/cell_2 | [
"image_output_1.png"
] | import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data | code |
90156742/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90156742/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
for beruf in new_data.columns:
print(beruf) | code |
90156742/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
plt.xlim(2004, 2017)
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
for beruf in new_data.columns:
plt.bar(beruf, new_data.loc[2012, beruf])
plt.title('Differenz des mittleren Stundenlohns zwischen Mann und Frau 2012')
plt.ylabel('Durchschnittliche Lohndifferenz in $')
plt.show() | code |
90156742/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
plt.xlim(2004, 2017)
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
np.random.seed(14)
NUM = 1000
p = 0.5
q = 1 - p
feat1_cl1 = np.random.normal(10, 1, int(NUM * p))
feat2_cl1 = np.random.normal(4, 2, int(NUM * p)) * 0.2 * feat1_cl1 + np.random.random(int(NUM * p))
t_cl1 = np.array([0] * int(NUM * p))
feat1_cl2 = np.random.normal(3, 2, int(NUM * q))
feat2_cl2 = np.random.normal(-1, 1, int(NUM * q)) * 2 * feat1_cl2 + np.random.random(int(NUM * q))
t_cl2 = np.array([1] * int(NUM * q))
plt.style.use('seaborn')
boston = pd.read_csv('/kaggle/input/others/House_Price.csv')
import seaborn as sns
sns.heatmap(boston.corr(), annotation=True)
plt.show() | code |
90156742/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
plt.xlim(2004, 2017)
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
np.random.seed(14)
NUM = 1000
p = 0.5
q = 1 - p
feat1_cl1 = np.random.normal(10, 1, int(NUM * p))
feat2_cl1 = np.random.normal(4, 2, int(NUM * p)) * 0.2 * feat1_cl1 + np.random.random(int(NUM * p))
t_cl1 = np.array([0] * int(NUM * p))
feat1_cl2 = np.random.normal(3, 2, int(NUM * q))
feat2_cl2 = np.random.normal(-1, 1, int(NUM * q)) * 2 * feat1_cl2 + np.random.random(int(NUM * q))
t_cl2 = np.array([1] * int(NUM * q))
plt.scatter(feat1_cl1, feat2_cl1, color='red', label='Klasse 1')
plt.scatter(feat1_cl2, feat2_cl2, color='green', label='Klasse 2')
plt.title('feat1 vs feat2')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.legend()
plt.style.use('seaborn')
plt.show() | code |
90156742/cell_12 | [
"image_output_1.png"
] | import pandas as pd
earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';')
data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']]
data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot']
data
new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']})
new_data.index = new_data['year']
new_data = new_data.drop('year', axis=1)
new_data
boston = pd.read_csv('/kaggle/input/others/House_Price.csv')
boston.head() | code |
88092667/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
1004716/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
df1 = df.corr()
df1 = df1[df1 < 1]
sns.heatmap(df1, annot=True) | code |
1004716/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
KNClassifier = KNeighborsClassifier()
KNClassifier.fit(xtrain, ytrain) | code |
1004716/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import ensemble
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
RMClassifier = ensemble.RandomForestClassifier()
RMClassifier.fit(xtrain, ytrain) | code |
1004716/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.utils import shuffle
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
df1 = df.corr()
df1 = df1[df1 < 1]
df.groupby(by='Type').mean()
sns.countplot(data=df, x='Type') | code |
1004716/cell_11 | [
"text_plain_output_1.png"
] | from sklearn import ensemble
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
RMClassifier = ensemble.RandomForestClassifier()
RMClassifier.fit(xtrain, ytrain)
RMClassifier.score(xtest, ytest)
type(RMClassifier.score(xtest, ytest)) | code |
1004716/cell_19 | [
"text_plain_output_1.png"
] | from sklearn import tree
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(xtrain, ytrain)
clf.score(xtest, ytest) | code |
1004716/cell_18 | [
"text_plain_output_1.png"
] | from sklearn import tree
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(xtrain, ytrain) | code |
1004716/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
print(df['Type'].value_counts().sort_values(ascending=False))
print() | code |
1004716/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/glass.csv')
df.head() | code |
1004716/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
KNClassifier = KNeighborsClassifier()
KNClassifier.fit(xtrain, ytrain)
KNClassifier.score(xtest, ytest) | code |
1004716/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.utils import shuffle
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
df1 = df.corr()
df1 = df1[df1 < 1]
df.groupby(by='Type').mean() | code |
1004716/cell_10 | [
"text_html_output_1.png"
] | from sklearn import ensemble
from sklearn.utils import shuffle
import pandas as pd
df = pd.read_csv('../input/glass.csv')
from sklearn.utils import shuffle
df = shuffle(df)
X = df.ix[:, :-1]
Y = df.ix[:, -1]
xtest = X.ix[:100,]
ytest = Y.ix[:100,]
xtrain = X.ix[100:,]
ytrain = Y.ix[100:,]
RMClassifier = ensemble.RandomForestClassifier()
RMClassifier.fit(xtrain, ytrain)
RMClassifier.score(xtest, ytest) | code |
16150848/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import os
spotify_data = pd.read_csv('../input/data.csv')
print(spotify_data.columns)
print(spotify_data.shape) | code |
16150848/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import os
import pandas as pd
import numpy as np
import pandas as pd
import os
spotify_data = pd.read_csv('../input/data.csv')
print(max(spotify_data['Streams']))
print(np.where(spotify_data['Streams'] == max(spotify_data['Streams'])))
print(spotify_data['Track Name'][3145443]) | code |
16150848/cell_2 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
spotify_data = pd.read_csv('../input/data.csv') | code |
16150848/cell_3 | [
"text_html_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import os
spotify_data = pd.read_csv('../input/data.csv')
spotify_data.head() | code |
74058977/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda,Flatten
from keras.models import Model,Sequential
from keras.optimizers import Adam
from keras.models import Model, Sequential
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda, Flatten
from keras.optimizers import Adam
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
def model_1():
model = Sequential()
model.add(Conv2D(60, (3, 3), input_shape=(512, 512, 1), activation='relu'))
model.add(Conv2D(60, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(30, (3, 3), activation='relu'))
model.add(Conv2D(30, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(activation='softmax'))
model.compile(Adam(lr=0.0002), loss='binary_crossentropy', metrics=['accuracy'])
return model
model = model_1()
print(model.summary()) | code |
74058977/cell_2 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np # linear algebra
import os
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
SIZE = 256
X_test = '../input/test-images'
Y_test = '../input/test-labels'
X_train = '../input/train-images'
Y_train = '../input/train-labels'
X_val = '../input/validation-images'
Y_val = '../input/validation-labels'
def load_dataset():
xtrainlist, xtestlist, xvallist = ([], [], [])
for image in os.listdir(X_train):
path = os.path.join(X_train, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
xtrainlist.append(img)
for image in os.listdir(X_test):
path = os.path.join(X_test, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xtestlist.append(img)
for image in os.listdir(X_val):
path = os.path.join(X_val, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xvallist.append(img)
x_train = np.array(xtrainlist)
x_test = np.array(xtestlist)
x_val = np.array(xvallist)
classes_list = [b'messy', b'clean']
classes = np.array(classes_list)
return (x_train, x_test, x_val, classes)
x_train, x_test, x_val, classes = load_dataset()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_val = np.expand_dims(x_val, axis=3)
x_train, x_test, x_val = (x_train / 255.0, x_test / 255.0, x_val / 255.0)
print('train_set_x_flatten shape: ' + str(x_train.shape))
print('train_set_y shape: ' + str(x_test.shape))
print('test_set_x_flatten shape: ' + str(x_val.shape)) | code |
74058977/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
SIZE = 256
X_test = '../input/test-images'
Y_test = '../input/test-labels'
X_train = '../input/train-images'
Y_train = '../input/train-labels'
X_val = '../input/validation-images'
Y_val = '../input/validation-labels'
def load_dataset():
xtrainlist, xtestlist, xvallist = ([], [], [])
for image in os.listdir(X_train):
path = os.path.join(X_train, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
xtrainlist.append(img)
for image in os.listdir(X_test):
path = os.path.join(X_test, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xtestlist.append(img)
for image in os.listdir(X_val):
path = os.path.join(X_val, image)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
xvallist.append(img)
x_train = np.array(xtrainlist)
x_test = np.array(xtestlist)
x_val = np.array(xvallist)
classes_list = [b'messy', b'clean']
classes = np.array(classes_list)
return (x_train, x_test, x_val, classes)
x_train, x_test, x_val, classes = load_dataset()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_val = np.expand_dims(x_val, axis=3)
x_train, x_test, x_val = (x_train / 255.0, x_test / 255.0, x_val / 255.0)
plt.imshow(x_train[300], cmap='gray') | code |
33101435/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
nRowsRead = None
df1 = pd.read_csv('/kaggle/input/tennis-20112019/atp.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'atp.csv'
nRow, nCol = df1.shape
print(f'There are {nRow} rows and {nCol} columns') | code |
33101435/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
nRowsRead = None
df1 = pd.read_csv('/kaggle/input/tennis-20112019/atp.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'atp.csv'
nRow, nCol = df1.shape
plotPerColumnDistribution(df1, 10, 5) | code |
33101435/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33101435/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
nRowsRead = None
df1 = pd.read_csv('/kaggle/input/tennis-20112019/atp.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'atp.csv'
nRow, nCol = df1.shape
df1.head(5) | code |
90130223/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
pd.options.display.max_rows = None
pd.options.display.max_columns = None
SEED = 581
data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv')
data.isna().any()
mapping = {'e': 1, 'p': 0}
data.rename({'class': 'edible'}, axis=1, inplace=True)
data['edible'] = data['edible'].replace(mapping)
data = data.astype('category')
data.dtypes
sum = 0
for n in data.nunique():
sum += n
sum = sum - data.shape[1]
print(sum) | code |
90130223/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
pd.options.display.max_rows = None
pd.options.display.max_columns = None
SEED = 581
data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv')
data.info() | code |
90130223/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
pd.options.display.max_rows = None
pd.options.display.max_columns = None
SEED = 581
data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv')
data.isna().any()
mapping = {'e': 1, 'p': 0}
data.rename({'class': 'edible'}, axis=1, inplace=True)
data['edible'] = data['edible'].replace(mapping)
data = data.astype('category')
data.dtypes
sum = 0
for n in data.nunique():
sum += n
sum = sum - data.shape[1]
data = pd.get_dummies(data, drop_first=True)
data.rename({'edible_1': 'edible'}, axis=1, inplace=True)
data.head() | code |
90130223/cell_26 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
pd.options.display.max_rows = None
pd.options.display.max_columns = None
SEED = 581
data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv')
data.isna().any()
mapping = {'e': 1, 'p': 0}
data.rename({'class': 'edible'}, axis=1, inplace=True)
data['edible'] = data['edible'].replace(mapping)
data = data.astype('category')
data.dtypes
sum = 0
for n in data.nunique():
sum += n
sum = sum - data.shape[1]
data = pd.get_dummies(data, drop_first=True)
data.rename({'edible_1': 'edible'}, axis=1, inplace=True)
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=SEED)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape) | code |
90130223/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
pd.options.display.max_rows = None
pd.options.display.max_columns = None
SEED = 581
data = pd.read_csv('/kaggle/input/mushroom-classification/mushrooms.csv')
data.isna().any()
data.describe() | code |
90130223/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.