path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105210810/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
file_path = '../input/car-price-prediction/CarPrice_Assignment.csv'
cars = pd.read_csv(file_path, index_col='car_ID')
cars.shape
cars.columns
cars.isnull().sum()
CompanyName = cars['CarName'].apply(lambda x: x.split(' ')[0])
cars.insert(2, 'CompanyName', CompanyName)
cars.drop(['CarName'], axis=1, inplace=True)
cars.CompanyName.unique()
cars.CompanyName = cars.CompanyName.str.lower()
cars.CompanyName.replace('maxda', 'mazda', inplace=True)
cars.CompanyName.replace('porcshce', 'porsche', inplace=True)
cars.CompanyName.replace('toyouta', 'toyota', inplace=True)
cars.CompanyName.replace('vokswagen', 'volkswagen', inplace=True)
cars.CompanyName.replace('vw', 'volkswagen', inplace=True)
cars.CompanyName.unique() | code |
105210810/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
file_path = '../input/car-price-prediction/CarPrice_Assignment.csv'
cars = pd.read_csv(file_path, index_col='car_ID')
cars.shape
cars.columns
cars.isnull().sum()
CompanyName = cars['CarName'].apply(lambda x: x.split(' ')[0])
cars.insert(2, 'CompanyName', CompanyName)
cars.drop(['CarName'], axis=1, inplace=True)
cars.CompanyName.unique()
cars.CompanyName = cars.CompanyName.str.lower()
cars.CompanyName.replace('maxda', 'mazda', inplace=True)
cars.CompanyName.replace('porcshce', 'porsche', inplace=True)
cars.CompanyName.replace('toyouta', 'toyota', inplace=True)
cars.CompanyName.replace('vokswagen', 'volkswagen', inplace=True)
cars.CompanyName.replace('vw', 'volkswagen', inplace=True)
cars.CompanyName.unique()
cars.loc[cars.duplicated()] | code |
105210810/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
file_path = '../input/car-price-prediction/CarPrice_Assignment.csv'
cars = pd.read_csv(file_path, index_col='car_ID')
cars.shape
cars.columns
cars.isnull().sum()
CompanyName = cars['CarName'].apply(lambda x: x.split(' ')[0])
cars.insert(2, 'CompanyName', CompanyName)
cars.drop(['CarName'], axis=1, inplace=True)
cars.CompanyName.unique() | code |
105210810/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
file_path = '../input/car-price-prediction/CarPrice_Assignment.csv'
cars = pd.read_csv(file_path, index_col='car_ID')
cars.head() | code |
48166353/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import xml.etree.ElementTree as ET
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
import xml.etree.ElementTree as ET
output = np.zeros((200, 4))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
tree = ET.parse('/kaggle/input/fruit-detection/annotations/fruit{}.xml'.format(i))
root = tree.getroot()
for elem in root:
for subelem in elem:
if subelem.tag == 'name':
if subelem.text == 'snake fruit':
output[q, 0] = 1
elif subelem.text == 'dragon fruit':
output[q, 1] = 1
elif subelem.text == 'banana':
output[q, 2] = 1
elif subelem.text == 'pineapple':
output[q, 3] = 1
q = q + 1
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ims, output, test_size=0.3, random_state=0)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(300, 400, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test), batch_size=64) | code |
48166353/cell_6 | [
"text_plain_output_1.png"
] | from PIL import Image
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import xml.etree.ElementTree as ET
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
import xml.etree.ElementTree as ET
output = np.zeros((200, 4))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
tree = ET.parse('/kaggle/input/fruit-detection/annotations/fruit{}.xml'.format(i))
root = tree.getroot()
for elem in root:
for subelem in elem:
if subelem.tag == 'name':
if subelem.text == 'snake fruit':
output[q, 0] = 1
elif subelem.text == 'dragon fruit':
output[q, 1] = 1
elif subelem.text == 'banana':
output[q, 2] = 1
elif subelem.text == 'pineapple':
output[q, 3] = 1
q = q + 1
from keras.utils import to_categorical
print(ims.shape)
print(output.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ims, output, test_size=0.3, random_state=0)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(y_train[0])
print(y_train.shape)
print(y_test.shape) | code |
48166353/cell_11 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.preprocessing import image
from lime import lime_image
from skimage.segmentation import mark_boundaries
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import xml.etree.ElementTree as ET
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
import xml.etree.ElementTree as ET
output = np.zeros((200, 4))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
tree = ET.parse('/kaggle/input/fruit-detection/annotations/fruit{}.xml'.format(i))
root = tree.getroot()
for elem in root:
for subelem in elem:
if subelem.tag == 'name':
if subelem.text == 'snake fruit':
output[q, 0] = 1
elif subelem.text == 'dragon fruit':
output[q, 1] = 1
elif subelem.text == 'banana':
output[q, 2] = 1
elif subelem.text == 'pineapple':
output[q, 3] = 1
q = q + 1
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ims, output, test_size=0.3, random_state=0)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(300, 400, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test), batch_size=64)
scores = model.evaluate(x_test, y_test, verbose=0)
predictions = model.predict(x_test[0:4, :, :, :])
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
from skimage.segmentation import mark_boundaries
image_example = ims[2]
explainer = lime_image.LimeImageExplainer(verbose=False)
explanation = explainer.explain_instance(image_example, classifier_fn=model.predict, top_labels=100, hide_color=0, num_samples=1000)
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=5, hide_rest=False)
plt.imshow(mark_boundaries(temp, mask)) | code |
48166353/cell_1 | [
"text_plain_output_1.png"
] | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
#!pip install image-classifiers==0.2.2
!pip install keras_sequential_ascii
#!pip install keras_applications
#!pip install plot_utils
import sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session | code |
48166353/cell_8 | [
"text_plain_output_2.png",
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(300, 400, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='sigmoid'))
model.summary() | code |
48166353/cell_3 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
print(q)
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
print(q) | code |
48166353/cell_10 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import xml.etree.ElementTree as ET
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
import xml.etree.ElementTree as ET
output = np.zeros((200, 4))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
tree = ET.parse('/kaggle/input/fruit-detection/annotations/fruit{}.xml'.format(i))
root = tree.getroot()
for elem in root:
for subelem in elem:
if subelem.tag == 'name':
if subelem.text == 'snake fruit':
output[q, 0] = 1
elif subelem.text == 'dragon fruit':
output[q, 1] = 1
elif subelem.text == 'banana':
output[q, 2] = 1
elif subelem.text == 'pineapple':
output[q, 3] = 1
q = q + 1
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ims, output, test_size=0.3, random_state=0)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(300, 400, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test), batch_size=64)
scores = model.evaluate(x_test, y_test, verbose=0)
print('Accuracy: %.2f%%' % (scores[1] * 100))
predictions = model.predict(x_test[0:4, :, :, :])
print(predictions)
print(y_test[0:4, :]) | code |
48166353/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.preprocessing import image
from lime import lime_image
from skimage.segmentation import mark_boundaries
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import eli5
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import xml.etree.ElementTree as ET
from PIL import Image
def change_image_channels(image, image_path):
if image.mode == 'RGBA':
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image.save(image_path)
elif image.mode != 'RGB':
image = image.convert('RGB')
os.remove(image_path)
image.save(image_path)
return image
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
change_image_channels(im, 'fruit_change{}.png'.format(i))
import matplotlib.pyplot as plt
ims = np.zeros((200, 300, 400, 3))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
try:
im = plt.imread('fruit_change{}.png'.format(i))
ims[q, :, :, :] = im
q = q + 1
except:
try:
im = Image.open('fruit_change{}.png'.format(i))
except:
im = Image.open('/kaggle/input/fruit-detection/images/fruit{}.png'.format(i))
imBackground = im.resize((400, 300))
imBackground.save('ProcessedImage{}.png'.format(i), 'png')
im = plt.imread('ProcessedImage{}.png'.format(i))
size = im.shape
x = size[0]
y = size[1]
channels = size[2]
ims[q, 0:x, 0:y, 0:channels] = im
q = q + 1
import xml.etree.ElementTree as ET
output = np.zeros((200, 4))
q = 0
for i in range(212):
if i >= 189 and i <= 195 or (i >= 203 and i <= 207):
continue
tree = ET.parse('/kaggle/input/fruit-detection/annotations/fruit{}.xml'.format(i))
root = tree.getroot()
for elem in root:
for subelem in elem:
if subelem.tag == 'name':
if subelem.text == 'snake fruit':
output[q, 0] = 1
elif subelem.text == 'dragon fruit':
output[q, 1] = 1
elif subelem.text == 'banana':
output[q, 2] = 1
elif subelem.text == 'pineapple':
output[q, 3] = 1
q = q + 1
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ims, output, test_size=0.3, random_state=0)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(300, 400, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test), batch_size=64)
scores = model.evaluate(x_test, y_test, verbose=0)
predictions = model.predict(x_test[0:4, :, :, :])
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
from skimage.segmentation import mark_boundaries
image_example = ims[2]
explainer = lime_image.LimeImageExplainer(verbose=False)
explanation = explainer.explain_instance(image_example, classifier_fn=model.predict, top_labels=100, hide_color=0, num_samples=1000)
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=5, hide_rest=False)
import eli5
image_example = np.expand_dims(ims[2], axis=0)
eli5.show_prediction(model, image_example) | code |
2017107/cell_9 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.feature_selection import VarianceThreshold
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
unique_cat = set(frame['position'].unique()) | {'unknown'}
frame['position_'] = pd.Categorical(frame['position'], categories=unique_cat).codes
frame['salary_'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True).codes
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000)
sampled_index = frame_.index
selector = VarianceThreshold(threshold=0.95 * (1 - 0.95))
frame_ = selector.fit_transform(frame_)
mds = MDS(n_components=2)
scaler = StandardScaler(with_mean=False)
frame_ = scaler.fit_transform(frame_)
frame_ = mds.fit_transform(frame_)
pc_1 = [frame_[i][0] for i in range(len(frame_))]
pc_2 = [frame_[i][1] for i in range(len(frame_))]
### Clustering with kmeans ###
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
def plot_pc (label, frame):
pc_1 = frame[frame['label']==label].loc[:,'pc1']
pc_2 = frame[frame['label']==label].loc[:,'pc2']
plt.scatter(pc_1, pc_2, label=label)
plt.legend()
def plot_clusters(function, n_clusters, function_kwargs=None):
if function_kwargs==None:
function_kwargs = dict()
model = function(n_clusters=n_clusters, **function_kwargs)
labels = model.fit_predict(frame_)
results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2})
for i in range(n_clusters):
plot_pc(i, results)
plt.title('{} ({} clusters)'.format(function.__name__, n_clusters))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13))
for i, ax in zip(range(7,11), fig.axes):
plt.subplot(ax)
plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'});
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13))
for i, ax in zip(range(7,11), fig.axes):
plt.subplot(ax)
plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward',
'affinity':'euclidean'})
n_clusters = 10
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', affinity='euclidean')
model.fit(frame_)
labels = model.labels_
results = pd.DataFrame({'label': labels, 'pc1': pc_1, 'pc2': pc_2})
plt.figure(figsize=(10, 8))
for i in range(n_clusters):
plot_pc(i, results) | code |
2017107/cell_6 | [
"image_output_1.png"
] | from sklearn.feature_selection import VarianceThreshold
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
unique_cat = set(frame['position'].unique()) | {'unknown'}
frame['position_'] = pd.Categorical(frame['position'], categories=unique_cat).codes
frame['salary_'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True).codes
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000)
sampled_index = frame_.index
selector = VarianceThreshold(threshold=0.95 * (1 - 0.95))
frame_ = selector.fit_transform(frame_)
print('{} features used'.format(frame_.shape[1]))
mds = MDS(n_components=2)
scaler = StandardScaler(with_mean=False)
frame_ = scaler.fit_transform(frame_)
frame_ = mds.fit_transform(frame_)
pc_1 = [frame_[i][0] for i in range(len(frame_))]
pc_2 = [frame_[i][1] for i in range(len(frame_))]
plt.figure(figsize=(10, 8))
plt.scatter(pc_1, pc_2, color='green')
plt.xlabel('pc 1')
plt.ylabel('pc 2') | code |
2017107/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
frame.iloc[:, 5:].head() | code |
2017107/cell_1 | [
"text_html_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
frame.iloc[:, :5].head() | code |
2017107/cell_7 | [
"image_output_1.png"
] | from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.feature_selection import VarianceThreshold
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
unique_cat = set(frame['position'].unique()) | {'unknown'}
frame['position_'] = pd.Categorical(frame['position'], categories=unique_cat).codes
frame['salary_'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True).codes
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000)
sampled_index = frame_.index
selector = VarianceThreshold(threshold=0.95 * (1 - 0.95))
frame_ = selector.fit_transform(frame_)
mds = MDS(n_components=2)
scaler = StandardScaler(with_mean=False)
frame_ = scaler.fit_transform(frame_)
frame_ = mds.fit_transform(frame_)
pc_1 = [frame_[i][0] for i in range(len(frame_))]
pc_2 = [frame_[i][1] for i in range(len(frame_))]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
def plot_pc(label, frame):
pc_1 = frame[frame['label'] == label].loc[:, 'pc1']
pc_2 = frame[frame['label'] == label].loc[:, 'pc2']
plt.scatter(pc_1, pc_2, label=label)
plt.legend()
def plot_clusters(function, n_clusters, function_kwargs=None):
if function_kwargs == None:
function_kwargs = dict()
model = function(n_clusters=n_clusters, **function_kwargs)
labels = model.fit_predict(frame_)
results = pd.DataFrame({'label': labels, 'pc1': pc_1, 'pc2': pc_2})
for i in range(n_clusters):
plot_pc(i, results)
plt.title('{} ({} clusters)'.format(function.__name__, n_clusters))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 13))
for i, ax in zip(range(7, 11), fig.axes):
plt.subplot(ax)
plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'}) | code |
2017107/cell_8 | [
"image_output_1.png"
] | from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.feature_selection import VarianceThreshold
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
unique_cat = set(frame['position'].unique()) | {'unknown'}
frame['position_'] = pd.Categorical(frame['position'], categories=unique_cat).codes
frame['salary_'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True).codes
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000)
sampled_index = frame_.index
selector = VarianceThreshold(threshold=0.95 * (1 - 0.95))
frame_ = selector.fit_transform(frame_)
mds = MDS(n_components=2)
scaler = StandardScaler(with_mean=False)
frame_ = scaler.fit_transform(frame_)
frame_ = mds.fit_transform(frame_)
pc_1 = [frame_[i][0] for i in range(len(frame_))]
pc_2 = [frame_[i][1] for i in range(len(frame_))]
### Clustering with kmeans ###
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
def plot_pc (label, frame):
pc_1 = frame[frame['label']==label].loc[:,'pc1']
pc_2 = frame[frame['label']==label].loc[:,'pc2']
plt.scatter(pc_1, pc_2, label=label)
plt.legend()
def plot_clusters(function, n_clusters, function_kwargs=None):
if function_kwargs==None:
function_kwargs = dict()
model = function(n_clusters=n_clusters, **function_kwargs)
labels = model.fit_predict(frame_)
results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2})
for i in range(n_clusters):
plot_pc(i, results)
plt.title('{} ({} clusters)'.format(function.__name__, n_clusters))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13))
for i, ax in zip(range(7,11), fig.axes):
plt.subplot(ax)
plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'});
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 13))
for i, ax in zip(range(7, 11), fig.axes):
plt.subplot(ax)
plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward', 'affinity': 'euclidean'}) | code |
2017107/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) | code |
2017107/cell_10 | [
"text_html_output_1.png"
] | from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.feature_selection import VarianceThreshold
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'})
pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
unique_cat = set(frame['position'].unique()) | {'unknown'}
frame['position_'] = pd.Categorical(frame['position'], categories=unique_cat).codes
frame['salary_'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True).codes
from sklearn.manifold import MDS
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000)
sampled_index = frame_.index
selector = VarianceThreshold(threshold=0.95 * (1 - 0.95))
frame_ = selector.fit_transform(frame_)
mds = MDS(n_components=2)
scaler = StandardScaler(with_mean=False)
frame_ = scaler.fit_transform(frame_)
frame_ = mds.fit_transform(frame_)
pc_1 = [frame_[i][0] for i in range(len(frame_))]
pc_2 = [frame_[i][1] for i in range(len(frame_))]
### Clustering with kmeans ###
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
def plot_pc (label, frame):
pc_1 = frame[frame['label']==label].loc[:,'pc1']
pc_2 = frame[frame['label']==label].loc[:,'pc2']
plt.scatter(pc_1, pc_2, label=label)
plt.legend()
def plot_clusters(function, n_clusters, function_kwargs=None):
if function_kwargs==None:
function_kwargs = dict()
model = function(n_clusters=n_clusters, **function_kwargs)
labels = model.fit_predict(frame_)
results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2})
for i in range(n_clusters):
plot_pc(i, results)
plt.title('{} ({} clusters)'.format(function.__name__, n_clusters))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13))
for i, ax in zip(range(7,11), fig.axes):
plt.subplot(ax)
plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'});
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13))
for i, ax in zip(range(7,11), fig.axes):
plt.subplot(ax)
plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward',
'affinity':'euclidean'})
n_clusters = 10
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', affinity='euclidean')
model.fit(frame_)
labels = model.labels_
results = pd.DataFrame({'label': labels, 'pc1': pc_1, 'pc2': pc_2})
label1 = 5
label0 = 1
frame = frame.loc[sampled_index]
frame['labels'] = labels
label_0 = frame[frame['labels'] == label0].drop(['position', 'salary'], axis=1)
label_1 = frame[frame['labels'] == label1].drop(['position', 'salary'], axis=1)
for name in label_1.columns:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
plt.subplot(ax1)
plot_dist(name, label_1)
plt.title(label1)
plt.subplot(ax2)
plot_dist(name, label_0, color='red')
plt.title(label0) | code |
2017107/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import seaborn as sns
import matplotlib.pyplot as plt
def plot_dist(name, frame, color='green'):
name0 = '{}_'.format(name)
if name0 not in frame.columns:
name0 = name
data_count = len(frame[name0].unique())
if data_count > 3:
sns.distplot(frame[name0], rug=False, color=color)
if data_count < 10:
plt.xticks(frame[name0].unique())
else:
sns.countplot(frame[name0], color=color)
name1 = ''.join([char.upper() if i == 0 else char for i, char in enumerate(list(name))])
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(nrows=3, ncols=3, figsize=(12, 10))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
cols_to_plot = list(frame.columns)
cols_to_plot.remove('left')
for ax, name in zip(fig.axes, cols_to_plot):
plt.subplot(ax)
plot_dist(name, frame)
sns.despine() | code |
128007514/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/list-of-world-cities-by-population-density/List of world cities by population density.csv')
def preprocess_inputs(df):
df = df.copy()
drop_cols = ['Unnamed: 0']
df = df.drop(drop_cols, axis=1)
df[Area]
return df
X = preprocess_inputs(data)
X.head() | code |
128007514/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/list-of-world-cities-by-population-density/List of world cities by population density.csv')
data.head() | code |
18119291/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
cat_data.head() | code |
18119291/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()] | code |
18119291/cell_25 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
print(train.OverallQual.unique()) | code |
18119291/cell_4 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
18119291/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
numeric_data.drop('Id', axis=1, inplace=True)
plt.figure(figsize=(20, 20))
corr = numeric_data.corr()
sns.heatmap(corr, annot=True) | code |
18119291/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
numeric_data.head()
numeric_data.drop('Id', axis=1, inplace=True) | code |
18119291/cell_29 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
cat_data.describe() | code |
18119291/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc='median')
pivot.plot(kind='bar') | code |
18119291/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing | code |
18119291/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18119291/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info() | code |
18119291/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
print(target.skew())
sns.distplot(target) | code |
18119291/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc='median')
train.GarageCars.unique()
pivot = train.pivot_table(index='GarageCars', values='SalePrice', aggfunc='median')
pivot.plot(kind='bar') | code |
18119291/cell_8 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum() | code |
18119291/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
sns.distplot(train['SalePrice']) | code |
18119291/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
print('The skewness of SalePrice is {}'.format(train['SalePrice'].skew())) | code |
18119291/cell_31 | [
"text_plain_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
numeric_data.drop('Id', axis=1, inplace=True)
corr = numeric_data.corr()
pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc='median')
train.GarageCars.unique()
pivot = train.pivot_table(index='GarageCars', values='SalePrice', aggfunc='median')
cat = [f for f in train.columns if train.dtypes[f] == 'object']
def anova(frame):
anv = pd.DataFrame()
anv['features'] = cat
pvals = []
for c in cat:
samples = []
for cls in frame[c].unique():
s = frame[frame[c] == cls]['SalePrice'].values
samples.append(s)
pval = stats.f_oneway(*samples)[1]
pvals.append(pval)
anv['pval'] = pvals
return anv.sort_values('pval')
cat_data['SalePrice'] = train.SalePrice.values
k = anova(cat_data)
k['disparity'] = np.log(1.0 / k['pval'].values)
sns.barplot(data=k, x='features', y='disparity')
plt.xticks(rotation=90)
plt | code |
18119291/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
numeric_data.drop('Id', axis=1, inplace=True)
corr = numeric_data.corr()
print(corr['SalePrice'].sort_values(ascending=False)) | code |
18119291/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
sns.barplot(x='name', y='count', data=missing)
plt.xticks(rotation=90) | code |
18119291/cell_27 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().any().sum()
train.columns[train.isnull().any()]
missing = train.isnull().sum() / len(train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing
missing = pd.DataFrame(missing)
missing.columns = ['count']
missing.index.names = ['name']
missing['name'] = missing.index
plt.xticks(rotation=90)
target = np.log(train['SalePrice'])
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
numeric_data.drop('Id', axis=1, inplace=True)
corr = numeric_data.corr()
pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc='median')
sns.jointplot(x=train['GrLivArea'], y=train['SalePrice'])
sns.jointplot(x=train['OverallQual'], y=train['SalePrice']) | code |
18119291/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
print(train.shape)
print(test.shape) | code |
105187475/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.describe() | code |
105187475/cell_25 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any()
df.isnull().sum()
plt.xticks(fontsize=14)
for column in df.columns:
print(column, '=>', df[column].nunique()) | code |
105187475/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any()
df.isnull().sum() | code |
105187475/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.head() | code |
105187475/cell_29 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any()
df.isnull().sum()
plt.xticks(fontsize=14)
categoricalFeature = [feature for feature in df.columns if df[feature].dtype == 'o']
categoricalFeature | code |
105187475/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any() | code |
105187475/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105187475/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape | code |
105187475/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull() | code |
105187475/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any()
df.isnull().sum()
plt.xticks(fontsize=14)
df.info() | code |
105187475/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.info() | code |
105187475/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.info() | code |
105187475/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes
df.isnull()
df.isnull().any()
df.isnull().sum()
plt.figure(figsize=(12, 10))
sns.heatmap(df.isnull(), cmap='viridis')
plt.xticks(fontsize=14)
plt.title('Count if Missing Values using heatmap')
plt.show() | code |
105187475/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/concretecsv/concrete.csv')
df.shape
df.dtypes | code |
1008613/cell_13 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
plt.figure(figsize=(11, 6))
for i in range(66):
plt.subplot(6, 11, i + 1)
plt.imshow(x_train[i])
plt.xticks([])
plt.yticks([])
plt.tight_layout() | code |
1008613/cell_9 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
print('Amount of training data:', num_training, 'pairs of images and labels.')
print('Amount of testing data:', num_testing, 'images.') | code |
1008613/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
for i in range(66):
plt.xticks([])
plt.yticks([])
plt.tight_layout()
rawx = train.values[:1000]
from sklearn.preprocessing import StandardScaler
xscaled = StandardScaler().fit_transform(rawx)
from sklearn.manifold import TSNE
tsne = TSNE()
vis = tsne.fit_transform(xscaled)
vis = [{'X': vis[i][0], 'Y': vis[i][1], 'K': y_train[i]} for i in range(len(vis))]
sns.FacetGrid(pd.DataFrame.from_dict(vis), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
pca_ani = pca.fit_transform(xscaled)
xpca = pca_ani[:, 0]
ypca = pca_ani[:, 1]
vispca = [{'X': xpca[i], 'Y': ypca[i], 'K': y_train[i]} for i in range(len(xpca))]
sns.FacetGrid(pd.DataFrame.from_dict(vispca), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
x_train = x_train / 255.0
x_test = x_test / 255.0
from sklearn.preprocessing import LabelBinarizer
y_train_hot = LabelBinarizer().fit_transform(y_train)
model = Sequential()
model.add(Conv2D(6, 5, 5, input_shape=(28, 28, 1), bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Dropout(p=0.12))
model.add(Conv2D(16, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Conv2D(35, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Flatten())
model.add(Dense(120, bias=True))
model.add(Activation('relu'))
model.add(Dropout(p=0.5))
model.add(Dense(84, bias=True))
model.add(Activation('relu'))
model.add(Dense(10, bias=True))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'], decay=1)
x_train = np.expand_dims(x_train, axis=4)
x_test = np.expand_dims(x_test, axis=4)
training_hist = model.fit(x_train, y_train_hot, nb_epoch=16, batch_size=64, verbose=2, validation_split=0.23)
hdf = pd.DataFrame.from_dict(training_hist.history)
hdf['epochs'] = list(range(16))
sns.FacetGrid(hdf, size=6).map(sns.pointplot, 'epochs', 'val_acc', color='y').map(sns.pointplot, 'epochs', 'acc', color='r').set(xlabel='Epochs', ylabel='Validation Accuracy (Yellow) and Test Accuracy (Red)')
sns.FacetGrid(hdf, size=6).map(sns.pointplot, 'epochs', 'loss', color='g').map(sns.pointplot, 'epochs', 'val_loss', color='m').set(xlabel='Epochs', ylabel='Validation Loss (Magenta) and Training Loss (Green)') | code |
1008613/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from subprocess import check_output
print('Files in Input Directory:')
print(check_output(['ls', '../input']).decode('utf8')) | code |
1008613/cell_19 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
for i in range(66):
plt.xticks([])
plt.yticks([])
plt.tight_layout()
rawx = train.values[:1000]
from sklearn.preprocessing import StandardScaler
xscaled = StandardScaler().fit_transform(rawx)
from sklearn.manifold import TSNE
tsne = TSNE()
vis = tsne.fit_transform(xscaled)
vis = [{'X': vis[i][0], 'Y': vis[i][1], 'K': y_train[i]} for i in range(len(vis))]
sns.FacetGrid(pd.DataFrame.from_dict(vis), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
pca_ani = pca.fit_transform(xscaled)
xpca = pca_ani[:, 0]
ypca = pca_ani[:, 1]
vispca = [{'X': xpca[i], 'Y': ypca[i], 'K': y_train[i]} for i in range(len(xpca))]
sns.FacetGrid(pd.DataFrame.from_dict(vispca), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend() | code |
1008613/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.head() | code |
1008613/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
for i in range(66):
plt.xticks([])
plt.yticks([])
plt.tight_layout()
rawx = train.values[:1000]
from sklearn.preprocessing import StandardScaler
xscaled = StandardScaler().fit_transform(rawx)
from sklearn.manifold import TSNE
tsne = TSNE()
vis = tsne.fit_transform(xscaled)
vis = [{'X': vis[i][0], 'Y': vis[i][1], 'K': y_train[i]} for i in range(len(vis))]
sns.FacetGrid(pd.DataFrame.from_dict(vis), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
pca_ani = pca.fit_transform(xscaled)
xpca = pca_ani[:, 0]
ypca = pca_ani[:, 1]
vispca = [{'X': xpca[i], 'Y': ypca[i], 'K': y_train[i]} for i in range(len(xpca))]
sns.FacetGrid(pd.DataFrame.from_dict(vispca), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
x_train = x_train / 255.0
x_test = x_test / 255.0
from sklearn.preprocessing import LabelBinarizer
y_train_hot = LabelBinarizer().fit_transform(y_train)
model = Sequential()
model.add(Conv2D(6, 5, 5, input_shape=(28, 28, 1), bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Dropout(p=0.12))
model.add(Conv2D(16, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Conv2D(35, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Flatten())
model.add(Dense(120, bias=True))
model.add(Activation('relu'))
model.add(Dropout(p=0.5))
model.add(Dense(84, bias=True))
model.add(Activation('relu'))
model.add(Dense(10, bias=True))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'], decay=1)
x_train = np.expand_dims(x_train, axis=4)
x_test = np.expand_dims(x_test, axis=4)
training_hist = model.fit(x_train, y_train_hot, nb_epoch=16, batch_size=64, verbose=2, validation_split=0.23)
hdf = pd.DataFrame.from_dict(training_hist.history)
hdf['epochs'] = list(range(16))
sns.FacetGrid(hdf, size=6).map(sns.pointplot, 'epochs', 'val_acc', color='y').map(sns.pointplot, 'epochs', 'acc', color='r').set(xlabel='Epochs', ylabel='Validation Accuracy (Yellow) and Test Accuracy (Red)') | code |
1008613/cell_28 | [
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.preprocessing import LabelBinarizer
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
x_train = x_train / 255.0
x_test = x_test / 255.0
from sklearn.preprocessing import LabelBinarizer
y_train_hot = LabelBinarizer().fit_transform(y_train)
model = Sequential()
model.add(Conv2D(6, 5, 5, input_shape=(28, 28, 1), bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Dropout(p=0.12))
model.add(Conv2D(16, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Conv2D(35, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Flatten())
model.add(Dense(120, bias=True))
model.add(Activation('relu'))
model.add(Dropout(p=0.5))
model.add(Dense(84, bias=True))
model.add(Activation('relu'))
model.add(Dense(10, bias=True))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'], decay=1)
x_train = np.expand_dims(x_train, axis=4)
x_test = np.expand_dims(x_test, axis=4)
training_hist = model.fit(x_train, y_train_hot, nb_epoch=16, batch_size=64, verbose=2, validation_split=0.23) | code |
1008613/cell_16 | [
"text_html_output_1.png"
] | from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
for i in range(66):
plt.xticks([])
plt.yticks([])
plt.tight_layout()
rawx = train.values[:1000]
from sklearn.preprocessing import StandardScaler
xscaled = StandardScaler().fit_transform(rawx)
from sklearn.manifold import TSNE
tsne = TSNE()
vis = tsne.fit_transform(xscaled)
vis = [{'X': vis[i][0], 'Y': vis[i][1], 'K': y_train[i]} for i in range(len(vis))]
sns.FacetGrid(pd.DataFrame.from_dict(vis), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend() | code |
1008613/cell_37 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_training = len(train.values)
num_testing = len(test.values)
y_train = np.array(train.pop('label').values)
x_train = np.array(train.values)
x_test = np.array(test.values)
x_train = x_train.reshape(num_training, 28, 28)
x_test = x_test.reshape(num_testing, 28, 28)
for i in range(66):
plt.xticks([])
plt.yticks([])
plt.tight_layout()
rawx = train.values[:1000]
from sklearn.preprocessing import StandardScaler
xscaled = StandardScaler().fit_transform(rawx)
from sklearn.manifold import TSNE
tsne = TSNE()
vis = tsne.fit_transform(xscaled)
vis = [{'X': vis[i][0], 'Y': vis[i][1], 'K': y_train[i]} for i in range(len(vis))]
sns.FacetGrid(pd.DataFrame.from_dict(vis), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
pca_ani = pca.fit_transform(xscaled)
xpca = pca_ani[:, 0]
ypca = pca_ani[:, 1]
vispca = [{'X': xpca[i], 'Y': ypca[i], 'K': y_train[i]} for i in range(len(xpca))]
sns.FacetGrid(pd.DataFrame.from_dict(vispca), hue='K', size=8).map(plt.scatter, 'X', 'Y').add_legend()
x_train = x_train / 255.0
x_test = x_test / 255.0
from sklearn.preprocessing import LabelBinarizer
y_train_hot = LabelBinarizer().fit_transform(y_train)
model = Sequential()
model.add(Conv2D(6, 5, 5, input_shape=(28, 28, 1), bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Dropout(p=0.12))
model.add(Conv2D(16, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Conv2D(35, 5, 5, bias=True, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same'))
model.add(Flatten())
model.add(Dense(120, bias=True))
model.add(Activation('relu'))
model.add(Dropout(p=0.5))
model.add(Dense(84, bias=True))
model.add(Activation('relu'))
model.add(Dense(10, bias=True))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'], decay=1)
x_train = np.expand_dims(x_train, axis=4)
x_test = np.expand_dims(x_test, axis=4)
training_hist = model.fit(x_train, y_train_hot, nb_epoch=16, batch_size=64, verbose=2, validation_split=0.23)
hdf = pd.DataFrame.from_dict(training_hist.history)
hdf['epochs'] = list(range(16))
pred = model.predict_classes(x_test, verbose=2)
sub_df = pd.DataFrame()
sub_df['ImageId'] = list(range(1, num_testing + 1))
sub_df['Label'] = pred
print('Amount of test points:', num_testing)
print('Amount of predictions:', len(pred))
sub_df.head() | code |
1008613/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
33102252/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Ydata = pd.read_csv('../input/youtube-new/USvideos.csv')
original_data = Ydata.copy()
Ydata.apply(lambda x: sum(x.isnull()))
Ydata.corr()
Ydata[(Ydata['likes'] > 500000) & (Ydata['dislikes'] > 500000)] | code |
18127655/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
pd.crosstab(data.USER_ID, data['PAGE']).head() | code |
18127655/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape
X_test.iloc[0, 0:1725].sum()
data = data[data.FEC_EVENT.dt.month < 10]
X_train = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_train.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_train = pd.concat(X_train, axis=1)
features = list(set(X_train.columns).intersection(set(X_test.columns)))
X_train = X_train[features]
X_test = X_test[features]
y_prev = pd.read_csv('../input/conversiones/conversiones.csv')
y_train = pd.Series(0, index=X_train.index)
idx = set(y_prev[y_prev.mes >= 10].USER_ID.unique()).intersection(set(X_train.index))
y_train.loc[list(idx)] = 1
y_train.head(23) | code |
18127655/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape
X_test.iloc[0, 0:1725].sum()
data = data[data.FEC_EVENT.dt.month < 10]
X_train = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_train.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_train = pd.concat(X_train, axis=1)
features = list(set(X_train.columns).intersection(set(X_test.columns)))
X_train = X_train[features]
X_test = X_test[features]
y_prev = pd.read_csv('../input/conversiones/conversiones.csv')
y_train = pd.Series(0, index=X_train.index)
idx = set(y_prev[y_prev.mes >= 10].USER_ID.unique()).intersection(set(X_train.index))
y_train.loc[list(idx)] = 1
fi = []
test_probs = []
i = 0
for train_idx, valid_idx in model_selection.KFold(n_splits=10, shuffle=True).split(X_train):
i += 1
Xt = X_train.iloc[train_idx]
yt = y_train.loc[X_train.index].iloc[train_idx]
Xv = X_train.iloc[valid_idx]
yv = y_train.loc[X_train.index].iloc[valid_idx]
learner = LGBMClassifier(n_estimators=10000)
learner.fit(Xt, yt, early_stopping_rounds=10, eval_metric='auc', eval_set=[(Xt, yt), (Xv, yv)])
test_probs.append(pd.Series(learner.predict_proba(X_test)[:, -1], index=X_test.index, name='fold_' + str(i)))
fi.append(pd.Series(learner.feature_importances_ / learner.feature_importances_.sum(), index=Xt.columns))
test_probs = pd.concat(test_probs, axis=1).mean(axis=1)
test_probs.index.name = 'USER_ID'
test_probs.name = 'SCORE'
test_probs.to_csv('benchmark.zip', header=True, compression='zip')
fi = pd.concat(fi, axis=1).mean(axis=1) | code |
18127655/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape
X_test.iloc[0, 0:1725].sum()
data = data[data.FEC_EVENT.dt.month < 10]
X_train = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_train.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_train = pd.concat(X_train, axis=1)
features = list(set(X_train.columns).intersection(set(X_test.columns)))
X_train = X_train[features]
X_test = X_test[features]
X_train.head() | code |
18127655/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape
X_test.iloc[0, 0:1725].sum() | code |
18127655/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
data.head() | code |
18127655/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.head() | code |
18127655/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape | code |
18127655/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
X_test.shape
X_test.iloc[0, 0:1725].sum()
data = data[data.FEC_EVENT.dt.month < 10]
X_train = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_train.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_train = pd.concat(X_train, axis=1)
features = list(set(X_train.columns).intersection(set(X_test.columns)))
X_train = X_train[features]
X_test = X_test[features]
y_prev = pd.read_csv('../input/conversiones/conversiones.csv')
y_train = pd.Series(0, index=X_train.index)
idx = set(y_prev[y_prev.mes >= 10].USER_ID.unique()).intersection(set(X_train.index))
y_train.loc[list(idx)] = 1
fi = []
test_probs = []
i = 0
for train_idx, valid_idx in model_selection.KFold(n_splits=10, shuffle=True).split(X_train):
i += 1
Xt = X_train.iloc[train_idx]
yt = y_train.loc[X_train.index].iloc[train_idx]
Xv = X_train.iloc[valid_idx]
yv = y_train.loc[X_train.index].iloc[valid_idx]
learner = LGBMClassifier(n_estimators=10000)
learner.fit(Xt, yt, early_stopping_rounds=10, eval_metric='auc', eval_set=[(Xt, yt), (Xv, yv)])
test_probs.append(pd.Series(learner.predict_proba(X_test)[:, -1], index=X_test.index, name='fold_' + str(i)))
fi.append(pd.Series(learner.feature_importances_ / learner.feature_importances_.sum(), index=Xt.columns))
test_probs = pd.concat(test_probs, axis=1).mean(axis=1)
test_probs.index.name = 'USER_ID'
test_probs.name = 'SCORE'
test_probs.to_csv('benchmark.zip', header=True, compression='zip')
fi = pd.concat(fi, axis=1).mean(axis=1)
test_probs | code |
18127655/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1)
data = data[data.FEC_EVENT.dt.month < 10]
X_train = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
print('haciendo', c)
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_train.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_train = pd.concat(X_train, axis=1) | code |
18127655/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # Manipulación y análisis de datos, Data Frames, lectura de CSV
data = pd.read_csv('../input/pageviews/pageviews.csv', parse_dates=['FEC_EVENT'])
X_test = []
for c in data.drop(['USER_ID', 'FEC_EVENT'], axis=1).columns:
print('haciendo', c)
temp = pd.crosstab(data.USER_ID, data[c])
temp.columns = [c + '_' + str(v) for v in temp.columns]
X_test.append(temp.apply(lambda x: x / x.sum(), axis=1))
X_test = pd.concat(X_test, axis=1) | code |
90120122/cell_6 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import tensorflow as tf
def show_image_with_filter(image, kernel):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.expand_dims(image, axis=0)
kernel = tf.reshape(kernel, [*kernel.shape, 1, 1])
kernel = tf.cast(kernel, dtype=tf.float32)
image_filter = tf.nn.conv2d(input=image, filters=kernel, strides=1, padding='SAME')
image_detect = tf.nn.relu(image_filter)
image_condense = tf.nn.pool(input=image_detect, window_shape=(2, 2), pooling_type='MAX', strides=(2, 2), padding='SAME')
plt.axis('off')
plt.axis('off')
plt.axis('off')
plt.axis('off')
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large', titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
tf.config.run_functions_eagerly(True)
image_path = '../input/computer-vision-resources/car_illus.jpg'
image = tf.io.read_file(image_path)
image = tf.io.decode_jpeg(image, channels=1)
image = tf.image.resize(image, size=[400, 400])
random_kernel = tf.constant([[1, 2, 3], [-1, -2, -3], [0, 0, 0]])
edge_detect_kernel = tf.constant([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
bottom_sobel_kernel = tf.constant([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
emboss_kernel = tf.constant([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
sharpen_kernel = tf.constant([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
print('\nRANDOM KERNEL:\n')
show_image_with_filter(image, random_kernel)
print('\nEDGE DETECT KERNEL:\n')
show_image_with_filter(image, edge_detect_kernel)
print('\nBOTTOM SOBEL KERNEL:\n')
show_image_with_filter(image, bottom_sobel_kernel)
print('\nEMBOSS KERNEL:\n')
show_image_with_filter(image, emboss_kernel)
print('\nSHARPEN KERNEL:\n')
show_image_with_filter(image, sharpen_kernel) | code |
32068077/cell_24 | [
"text_plain_output_1.png"
] | import cv2
import os
import random
import tensorflow
from imgaug import augmenters as iaa
import numpy as np
import time
import random
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import os
from sklearn.utils.multiclass import unique_labels
import cv2
import matplotlib.pyplot as plt
import seaborn as sn
import tensorflow
layers = tensorflow.keras.layers
BatchNormalization = tensorflow.keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
Flatten = tensorflow.keras.layers.Flatten
MaxPooling2D = tensorflow.keras.layers.MaxPooling2D
Dropout = tensorflow.keras.layers.Dropout
Dense = tensorflow.keras.layers.Dense
ImageDataGenerator = tensorflow.keras.preprocessing.image.ImageDataGenerator
Sequential = tensorflow.keras.Sequential
TensorBoard = tensorflow.keras.callbacks.TensorBoard
ModelCheckpoint = tensorflow.keras.callbacks.ModelCheckpoint
Adam = tensorflow.keras.optimizers.Adam
regularizers = tensorflow.keras.regularizers
categorical_crossentropy = tensorflow.keras.losses
K = tensorflow.keras.backend
plot_model = tensorflow.keras.utils.plot_model
from nltk.corpus import words
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
DATADIR = '../input/lse-alphabet/train_data'
CATEGORIES = ['A_PD', 'A_PF', 'B', 'C', 'CH', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'LL', 'M', 'N', 'Ñ', 'O', 'P', 'Q', 'R', 'RR', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'nothing']
TEST_DIR = '../input/lse-alphabet/test_data'
NUM_CATEGORIES = 32
def create_training_data(DIR):
"""This function is run for each model in order to get the training data from the filepath
and convert it into array format"""
training_data = []
for category in CATEGORIES:
path = os.path.join(DIR, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
new_array = cv2.resize(img_array, (64, 64))
final_img = cv2.cvtColor(new_array, cv2.COLOR_BGR2RGB)
training_data.append([final_img, class_num])
seq = iaa.Sequential([iaa.Multiply((0.8, 1.2), per_channel=0.2), iaa.Crop(px=(0, 16)), iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5)))])
final_img_aug = seq.augment_images([final_img])
training_data.append([final_img, class_num])
training_data.append([final_img_aug[0], class_num])
except Exception as e:
pass
return training_data
def additional_augmenation(image):
randomValue = random.randint(1, 10)
if randomValue == 9:
aug = iaa.GaussianBlur(sigma=(0, 0.5))
return aug.augment_image(image)
elif randomValue == 8:
aug = iaa.Multiply((0.8, 1.2), per_channel=0.2)
return aug.augment_image(image)
elif randomValue == 7:
aug = iaa.Crop(px=(0, 16))
return aug.augment_image(image)
else:
return image
input_shape = (64, 64, 3)
training_data = create_training_data(DATADIR)
print(len(training_data))
random.shuffle(training_data) | code |
18109621/cell_4 | [
"text_plain_output_1.png"
] | from bq_helper import BigQueryHelper
bq_assistant = BigQueryHelper('patents-public-data', 'worldbank_wdi')
bq_assistant.list_tables()
bq_assistant.head('wdi_2016', num_rows=10) | code |
18109621/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import bq_helper
import bq_helper
from bq_helper import BigQueryHelper
wdi = bq_helper.BigQueryHelper(active_project='patents-public-data', dataset_name='worldbank_wdi') | code |
18109621/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from bq_helper import BigQueryHelper
import pandas as pd
bq_assistant = BigQueryHelper('patents-public-data', 'worldbank_wdi')
bq_assistant.list_tables()
bq_assistant.table_schema('wdi_2016')
import pandas as pd
pd.get_option('max_colwidth')
pd.set_option('max_colwidth', 500)
query1 = '\nSELECT year, country_code,country_name, indicator_code, indicator_name, indicator_value\nFROM `patents-public-data.worldbank_wdi.wdi_2016`\nwhere country_code in ("GRC", "GBR", "FRA", "ITA", "BEL", "CHN","CYP","DEU","EMU","MKD","PRT","TUR","USA", "DNK")\n and year>=2014 \n and indicator_code not like ("per_%") \n and indicator_code not like ("DC.DAC.%") \n and indicator_code not like ("DT.%") \nORDER BY year DESC, indicator_code\n-- LIMIT 200;\n '
bq_assistant.estimate_query_size(query1) | code |
18109621/cell_3 | [
"text_html_output_1.png"
] | from bq_helper import BigQueryHelper
bq_assistant = BigQueryHelper('patents-public-data', 'worldbank_wdi')
bq_assistant.list_tables() | code |
18109621/cell_14 | [
"text_plain_output_1.png"
] | import bq_helper
import bq_helper
from bq_helper import BigQueryHelper
wdi = bq_helper.BigQueryHelper(active_project='patents-public-data', dataset_name='worldbank_wdi')
query1 = '\nSELECT year, country_code,country_name, indicator_code, indicator_name, indicator_value\nFROM `patents-public-data.worldbank_wdi.wdi_2016`\nwhere country_code in ("GRC", "GBR", "FRA", "ITA", "BEL", "CHN","CYP","DEU","EMU","MKD","PRT","TUR","USA", "DNK")\n and year>=2014 \n and indicator_code not like ("per_%") \n and indicator_code not like ("DC.DAC.%") \n and indicator_code not like ("DT.%") \nORDER BY year DESC, indicator_code\n-- LIMIT 200;\n '
response1 = wdi.query_to_pandas(query1)
base = 'http://test.org.newsmap/wdi-ontology/'
countryCode = base + '#countryCode'
countryName = base + '#countryName'
WDIcode = base + '#WDIcode'
WDIyear = base + '#WDIyear'
WDIname = base + '#WDIname'
WDIdescription = base + '#WDIdescription'
WDIvalue = base + '#WDIvalue'
changeLineStr = 'LLL'
doubleQuotes = '_DQ_'
response1['tuple'] = ''
response1.tuple = '<' + base + response1.country_code + '_' + response1.indicator_code + '_yearhere> ' + changeLineStr
response1.tuple = response1.tuple + '<' + countryCode + '> ' + doubleQuotes + response1.country_code + doubleQuotes + '; ' + changeLineStr
response1.tuple = response1.tuple + '<' + countryName + '> ' + doubleQuotes + response1.country_name + doubleQuotes + '; ' + changeLineStr
response1.tuple = response1.tuple + '<' + WDIcode + '> ' + doubleQuotes + response1.indicator_code + doubleQuotes + '; ' + changeLineStr
response1.tuple = response1.tuple + '<' + WDIyear + '> ' + 'yearhere; ' + changeLineStr
response1.tuple = response1.tuple + '<' + WDIname + '> ' + doubleQuotes + response1.indicator_name + doubleQuotes + '; ' + changeLineStr
response1.tuple = response1.tuple + '<' + WDIvalue + '> _DQ_valuehere_DQ_^^<http://www.w3.org/2001/XMLSchema#float> . ' + changeLineStr
response1.shape
df = response1.copy()
df = df[['year', 'indicator_value', 'tuple']]
df.to_excel('kaggle_wdidata.xlsx', index=False)
df = df[(df['country_name'] == 'Greece') & (df['year'] == 2014) & (df['indicator_code'] == 'SL.UEM.TOTL.ZS')]
df = df[['year', 'indicator_value', 'tuple']]
df.head(10) | code |
50211059/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns | code |
50211059/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub | code |
50211059/cell_20 | [
"text_html_output_1.png"
] | from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr()
df1 = df.T
df1
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='hamming')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='euclidean')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim | code |
50211059/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr() | code |
50211059/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.head(2) | code |
50211059/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50211059/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns | code |
50211059/cell_18 | [
"text_html_output_1.png"
] | from sklearn.metrics.pairwise import pairwise_distances
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr()
df1 = df.T
df1
from sklearn.metrics.pairwise import pairwise_distances
jac_sim = 1 - pairwise_distances(df1.T, metric='hamming')
jac_sim = pd.DataFrame(jac_sim, index=df1.columns, columns=df1.columns)
jac_sim | code |
50211059/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique() | code |
50211059/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr() | code |
50211059/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd
df_sub = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample submission.csv')
df_sub
df_data = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/data.csv')
df_data
df_data.corr()
df_data.columns
df_data.other_interests.unique()
df = df_data
df_uid = df[['user_id']]
df.columns
df.T.corr()
df1 = df.T
df1 | code |
50211059/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_sd = pd.read_csv('/kaggle/input/hacker-earth-love-in-the-screens/sample dataset.csv')
df_sd | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.