path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88099842/cell_19
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import wfdb data = '../input/mit-bih-arrhythmia-database/' patients = ['100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '111', '112', '113', '114', '115', '116', '117', '118', '119', '121', '122', '123', '124', '200', '201', '202', '203', '205', '207', '208', '209', '210', '212', '213', '214', '215', '217', '219', '220', '221', '222', '223', '228', '230', '231', '232', '233', '234'] dataframe = pd.DataFrame() for pts in patients: file = data + pts annotation = wfdb.rdann(file, 'atr') sym = annotation.symbol values, counts = np.unique(sym, return_counts=True) df_sub = pd.DataFrame({'symbol': values, 'Counts': counts, 'Patient Number': [pts] * len(counts)}) dataframe = pd.concat([dataframe, df_sub], axis=0) ax = sns.countplot(dataframe.symbol) nonbeat = ['[', '!', ']', 'x', '(', ')', 'p', 't', 'u', '`', "'", '^', '|', '~', '+', 's', 'T', '*', 'D', '=', '"', '@', 'Q', '?'] abnormal = ['L', 'R', 'V', '/', 'A', 'f', 'F', 'j', 'a', 'E', 'J', 'e', 'S'] normal = ['N'] dataframe['category'] = -1 dataframe.loc[dataframe.symbol == 'N', 'category'] = 0 dataframe.loc[dataframe.symbol.isin(abnormal), 'category'] = 1 dataframe.groupby('category').Counts.sum() dataframe = dataframe.loc[~(dataframe['category'] == -1)] dataframe.groupby('category').Counts.sum()
code
88099842/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import wfdb data = '../input/mit-bih-arrhythmia-database/' patients = ['100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '111', '112', '113', '114', '115', '116', '117', '118', '119', '121', '122', '123', '124', '200', '201', '202', '203', '205', '207', '208', '209', '210', '212', '213', '214', '215', '217', '219', '220', '221', '222', '223', '228', '230', '231', '232', '233', '234'] dataframe = pd.DataFrame() for pts in patients: file = data + pts annotation = wfdb.rdann(file, 'atr') sym = annotation.symbol values, counts = np.unique(sym, return_counts=True) df_sub = pd.DataFrame({'symbol': values, 'Counts': counts, 'Patient Number': [pts] * len(counts)}) dataframe = pd.concat([dataframe, df_sub], axis=0) ax = sns.countplot(dataframe.symbol) nonbeat = ['[', '!', ']', 'x', '(', ')', 'p', 't', 'u', '`', "'", '^', '|', '~', '+', 's', 'T', '*', 'D', '=', '"', '@', 'Q', '?'] abnormal = ['L', 'R', 'V', '/', 'A', 'f', 'F', 'j', 'a', 'E', 'J', 'e', 'S'] normal = ['N'] dataframe['category'] = -1 dataframe.loc[dataframe.symbol == 'N', 'category'] = 0 dataframe.loc[dataframe.symbol.isin(abnormal), 'category'] = 1 dataframe.groupby('category').Counts.sum()
code
88099842/cell_8
[ "image_output_2.png", "image_output_1.png" ]
import os import os print(os.listdir('../input'))
code
88099842/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import wfdb data = '../input/mit-bih-arrhythmia-database/' patients = ['100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '111', '112', '113', '114', '115', '116', '117', '118', '119', '121', '122', '123', '124', '200', '201', '202', '203', '205', '207', '208', '209', '210', '212', '213', '214', '215', '217', '219', '220', '221', '222', '223', '228', '230', '231', '232', '233', '234'] dataframe = pd.DataFrame() for pts in patients: file = data + pts annotation = wfdb.rdann(file, 'atr') sym = annotation.symbol values, counts = np.unique(sym, return_counts=True) df_sub = pd.DataFrame({'symbol': values, 'Counts': counts, 'Patient Number': [pts] * len(counts)}) dataframe = pd.concat([dataframe, df_sub], axis=0) ax = sns.countplot(dataframe.symbol)
code
88099842/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import wfdb data = '../input/mit-bih-arrhythmia-database/' patients = ['100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '111', '112', '113', '114', '115', '116', '117', '118', '119', '121', '122', '123', '124', '200', '201', '202', '203', '205', '207', '208', '209', '210', '212', '213', '214', '215', '217', '219', '220', '221', '222', '223', '228', '230', '231', '232', '233', '234'] dataframe = pd.DataFrame() for pts in patients: file = data + pts annotation = wfdb.rdann(file, 'atr') sym = annotation.symbol values, counts = np.unique(sym, return_counts=True) df_sub = pd.DataFrame({'symbol': values, 'Counts': counts, 'Patient Number': [pts] * len(counts)}) dataframe = pd.concat([dataframe, df_sub], axis=0) ax = sns.countplot(dataframe.symbol) dataframe
code
88099842/cell_3
[ "text_html_output_1.png" ]
!pip install wfdb
code
88099842/cell_5
[ "text_plain_output_1.png" ]
pip install matplotlib==3.1.3
code
1003427/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
train_data = pd.read_csv('/Users/apple/Desktop/Data science/datasets/train.csv') test_data = pd.read_csv('/Users/apple/Desktop/Data science/datasets/test.csv') gender_submission = pd.read_csv('/Users/apple/Desktop/Data science/datasets/gender_submission.csv')
code
1003427/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
train_data = pd.read_csv('/Users/apple/Desktop/Data science/datasets/train.csv') test_data = pd.read_csv('/Users/apple/Desktop/Data science/datasets/test.csv') gender_submission = pd.read_csv('/Users/apple/Desktop/Data science/datasets/gender_submission.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1)
code
122265203/cell_21
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5)
code
122265203/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2)
code
122265203/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number'])
code
122265203/cell_25
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) score, acc = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred
code
122265203/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names'])
code
122265203/cell_23
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) hist_ = pd.DataFrame(hist.history) hist_ plt.plot(hist_['accuracy'], label='Accuracy') plt.plot(hist_['loss'], label='Loss') plt.title('Accuracy && LOSS') plt.legend()
code
122265203/cell_20
[ "text_html_output_1.png" ]
from tensorflow import keras import tensorflow as tf shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True, show_dtype=True, dpi=120)
code
122265203/cell_6
[ "text_html_output_1.png" ]
import cv2 import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files)
code
122265203/cell_26
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) hist_ = pd.DataFrame(hist.history) hist_ score, acc = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred pred_Name = [] pred_number = [] for row in y_pred: N = np.argmax(row) pred_Name.append(get_Name(N)) pred_number.append(N) pd.DataFrame(pred_Name, columns=['pred Names'])
code
122265203/cell_11
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) plt.figure(figsize=(50, 50)) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.subplot(10, 10, n + 1) plt.imshow(train_image[i]) plt.axis('off') plt.title(label[i], fontsize=25)
code
122265203/cell_19
[ "text_html_output_1.png" ]
from tensorflow import keras import tensorflow as tf shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary()
code
122265203/cell_7
[ "text_html_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) plt.figure(figsize=(5, 5)) sns.countplot(x=size_number, hue=files)
code
122265203/cell_28
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) hist_ = pd.DataFrame(hist.history) hist_ score, acc = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred pred_Name = [] pred_number = [] for row in y_pred: N = np.argmax(row) pred_Name.append(get_Name(N)) pred_number.append(N) pd.DataFrame(pred_Name, columns=['pred Names']) plt.figure(figsize=(40, 60)) n = 1 for i in range(100): plt.subplot(20, 5, n) plt.imshow(X_test[i]) plt.axis('off') ti = get_Name(y_test[i]) + ' predict ' + pred_Name[i] plt.title(ti, fontsize=25) plt.legend() n += 1
code
122265203/cell_8
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) plt.figure(figsize=(10, 10)) plt.pie(x=size_number, labels=files, autopct='%1.1f%%')
code
122265203/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) print('X_train shape is ', X_train.shape) print('X_test shape is ', X_test.shape) print('y_train shape is ', y_train.shape) print('y_test shape is ', y_test.shape)
code
122265203/cell_24
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) score, acc = model.evaluate(X_test, y_test) print('Test Loss =', score) print('Test Accuracy =', acc)
code
122265203/cell_22
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) hist_ = pd.DataFrame(hist.history) hist_
code
122265203/cell_10
[ "text_html_output_1.png" ]
import cv2 import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label'])
code
122265203/cell_27
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import tensorflow as tf train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) for n, i in enumerate(np.random.randint(0, len(train_image), 100)): plt.axis('off') code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code def get_Name(N): for x, y in code.items(): if y == N: return x label2 = [] for i in label: label2.append(code[i]) label2 = np.array(label2) pd.DataFrame(label2) train_image = np.array(train_image) X_train, X_test, y_train, y_test = train_test_split(train_image, label2, test_size=0.1, random_state=44, shuffle=True) shape = (100, 100, 3) num_class = 5 model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.MaxPool2D((3, 3))) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(512, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(128, activation=tf.nn.relu)) model.add(keras.layers.Dropout(0.3)) model.add(keras.layers.Dense(num_class, activation=tf.nn.softmax)) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) hist = model.fit(X_train, y_train, epochs=5) hist_ = pd.DataFrame(hist.history) hist_ score, acc = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) y_pred pred_Name = [] pred_number = [] for row in y_pred: N = np.argmax(row) pred_Name.append(get_Name(N)) pred_number.append(N) pd.DataFrame(pred_Name, columns=['pred Names']) pd.DataFrame(pred_number, columns=['pred Number'])
code
122265203/cell_12
[ "text_html_output_1.png" ]
import cv2 import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files size_number = [] size = [] for file in files: path = os.path.join(train, file) size_number.append(len(os.listdir(path))) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) size.append(image.shape) pd.DataFrame(size_number, columns=['size'], index=files) pd.DataFrame(pd.Series(size).value_counts(), columns=['Number']) train_image = [] label = [] for file in files: path = os.path.join(train, file) for img in os.listdir(path): image = cv2.imread(os.path.join(path, img)) image = cv2.resize(image, (100, 100)) train_image.append(image) label.append(file) pd.DataFrame(label, columns=['label']) code = {} label_uniqe = list(pd.unique(label)) for i in range(5): code[label_uniqe[i]] = i code
code
122265203/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import pandas as pd train = '/kaggle/input/rice-image-dataset/Rice_Image_Dataset' file_names = os.listdir(train) pd.DataFrame(file_names, columns=['Names']) files = [] for file in file_names: if file == 'Rice_Citation_Request.txt': continue files.append(file) files
code
89142662/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum() data = data.drop_duplicates() data.duplicated().sum() data.isna().sum()
code
89142662/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum() data.head(20)
code
89142662/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data['T (degC)'].plot(figsize=(15, 10))
code
89142662/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum()
code
89142662/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum() data = data.drop_duplicates() data.duplicated().sum()
code
89142662/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89142662/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum()
code
89142662/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum()
code
89142662/cell_15
[ "text_plain_output_1.png" ]
(len(data_hourly), data_hourly.isna().sum())
code
89142662/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data
code
89142662/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum() data = data.drop_duplicates() data.duplicated().sum() data.isna().sum() data.index = pd.to_datetime(data['Date Time'], format='%d.%m.%Y %H:%M:%S') data
code
89142662/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.isna().sum() data.duplicated().sum() data.duplicated(subset=['Date Time']).sum() data = data.drop_duplicates() data.duplicated().sum() data.head(20)
code
89142662/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/jena-climate/jena_climate_2009_2016.csv') data data.info()
code
90154574/cell_13
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) print('Intercept :', regr.intercept_) print('Coefficient :', regr.coef_) print(x)
code
90154574/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() plt.figure(figsize=(15, 10)) sns.heatmap(corr, vmax=0.8, annot=True, fmt='.2f') plt.show()
code
90154574/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/housedata/output.csv') df.head()
code
90154574/cell_20
[ "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] f = function(x, y) plt.scatter(x, f) plt.plot(x, f) plt.xlabel('X') plt.ylabel('f(X)')
code
90154574/cell_26
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) predicted = regr.predict([[2, 3, 1680, 2180]]) predicted = regr.predict([[2.75, 4.0, 1400, 2490]]) predicted = regr.predict([[8.0, 7.0, 9410, 13540]]) predicted = regr.predict([[1.0, 0.0, 0.0, 0.0]]) predicted = regr.predict([[0.0, 1.0, 0.0, 0.0]]) predicted = regr.predict([[1.0, 0.0, 1.0, 0.0]]) predicted = regr.predict([[0.0, 0.0, 0.0, 1.0]]) df.isnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] f = function(x, y) x = df[['bedrooms']] y = df['price'] import numpy as np def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m,1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((x.shape[0],1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure() plt.plot(x, y, 'b.') plt.plot(x, preds, 'c-') plt.xlabel('X - Input') plt.ylabel('y - target/true') x = df[['bedrooms']] y = df['price'] regr2 = linear_model.LinearRegression() regr2.fit(x.values, y) arr = [] index = [] for i in range(0, 9, 1): predicted = regr2.predict([[i]]) arr.append(predicted[0]) index.append(i) fig = plt.figure() plt.plot(x, y, 'b.') plt.plot(index, arr, 'c-')
code
90154574/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/housedata/output.csv') df.hist(figsize=(20, 20)) plt.show()
code
90154574/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() df.isnull().sum()
code
90154574/cell_24
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] f = function(x, y) x = df[['bedrooms']] y = df['price'] import numpy as np def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m, 1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((x.shape[0], 1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure() plt.plot(x, y, 'b.') plt.plot(x, preds, 'c-') plt.xlabel('X - Input') plt.ylabel('y - target/true')
code
90154574/cell_14
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) predicted = regr.predict([[2, 3, 1680, 2180]]) print('predicted with [2.0,3.0,1680,2180] so, price :', predicted) predicted = regr.predict([[2.75, 4.0, 1400, 2490]]) print('predicted with [2.75,4.0,1400,2490] so, price :', predicted) predicted = regr.predict([[8.0, 7.0, 9410, 13540]]) print('predicted with [8.0,7.0,9410,13540] so, price :', predicted) predicted = regr.predict([[1.0, 0.0, 0.0, 0.0]]) print('predicted with bathrooms in 1 unit so, price :', predicted) predicted = regr.predict([[0.0, 1.0, 0.0, 0.0]]) print('predicted with bedrooms in 1 unit so, price :', predicted) predicted = regr.predict([[1.0, 0.0, 1.0, 0.0]]) print('predicted with sqft_above in 1 unit so, price :', predicted) predicted = regr.predict([[0.0, 0.0, 0.0, 1.0]]) print('predicted with sqft_living in 1 unit so, price :', predicted)
code
90154574/cell_22
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/housedata/output.csv') corr = df.corr() x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['bathrooms', 'bedrooms', 'sqft_above', 'sqft_living']] y = df['price'] f = function(x, y) x = df[['bedrooms']] y = df['price'] plt.plot(x, y, 'r.')
code
90154574/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/housedata/output.csv') df.describe()
code
18116806/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.xticks(rotation=90) scipy.stats.chisquare(hosp.age) scipy.stats.pearsonr(hosp.age, hosp.admit_type) scipy.stats.skew(hosp.age, axis=0, bias=True, nan_policy='propagate') numpy.histogram(hosp.age, bins=40, range=None, normed=None, weights=None, density=None)
code
18116806/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.figure(figsize=(20, 10)) sns.countplot(x='age', data=hosp, palette='bwr') plt.title('Distibution of Age') plt.xticks(rotation=90) plt.show()
code
18116806/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.describe(hosp.age)
code
18116806/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes
code
18116806/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.xticks(rotation=90) scipy.stats.chisquare(hosp.age) scipy.stats.pearsonr(hosp.age, hosp.admit_type) scipy.stats.skew(hosp.age, axis=0, bias=True, nan_policy='propagate')
code
18116806/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape hosp.head(5)
code
18116806/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.info()
code
18116806/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy as scipy from scipy import stats import os print(os.listdir('../input'))
code
18116806/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape hosp['AdmitDiagnosis'].unique().shape
code
18116806/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.xticks(rotation=90) scipy.stats.chisquare(hosp.age) scipy.stats.pearsonr(hosp.age, hosp.admit_type)
code
18116806/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape hosp['age'].unique().shape
code
18116806/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.xticks(rotation=90) scipy.stats.chisquare(hosp.age)
code
18116806/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.describe()
code
18116806/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy import seaborn as sns hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum() plt.xticks(rotation=90) plt.figure(figsize=(20, 20)) sns.heatmap(cbar=False, annot=True, data=hosp.corr() * 100, cmap='coolwarm') plt.title('% Corelation Matrix') plt.show()
code
18116806/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age)
code
18116806/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as scipy hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape scipy.stats.kurtosis(hosp.age) hosp.isnull().sum()
code
18116806/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hosp = pd.read_csv('../input/mimic3d.csv') hosp.dtypes hosp.shape
code
105210311/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() sns.heatmap(data.isnull())
code
105210311/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls
code
105210311/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() data.shape
code
105210311/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum()
code
105210311/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.head()
code
105210311/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() data.shape data = data[data['Flight Distance'] < 4000] data.shape
code
105210311/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() data.shape sns.set(rc={'figure.figsize': (8, 4)}) sns.scatterplot(x='Flight Distance', y='Satisfaction', data=data)
code
105210311/cell_11
[ "text_html_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() passengers.describe()
code
105210311/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) sns.displot(data=data, x='Arrival Delay', kind='kde')
code
105210311/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.info()
code
105210311/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() data.shape sns.set(rc={'figure.figsize': (8, 4)}) data = data[data['Flight Distance'] < 4000] sns.set(rc={'figure.figsize': (8, 4)}) sns.scatterplot(x='Flight Distance', y='Satisfaction', data=data)
code
105210311/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape
code
105210311/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum()
code
105210311/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() sns.displot(data=data, x='Arrival Delay', kind='kde')
code
105210311/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() data = passengers.copy() data.drop('ID', axis=1, inplace=True) data.isnull().sum() fill_list = data['Arrival Delay'].dropna() data['Arrival Delay'] = data['Arrival Delay'].fillna(pd.Series(np.random.choice(fill_list, size=len(data.index)))) data.isnull().sum() data.plot(kind='box', subplots=True, figsize=(18, 15), layout=(4, 5)) plt.show()
code
105210311/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum()
code
105210311/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns passengers.shape nulls = passengers.isnull().sum() nulls passengers.duplicated().sum() plt.figure(figsize=(16, 10)) sns.heatmap(passengers.corr(), cbar=True, annot=True)
code
105210311/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd passengers = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv', sep=',', encoding='utf-8') passengers.columns
code
72081346/cell_13
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test = pd.read_csv('../input/30-days-of-ml/train.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.shape target = train.target X_trainfull, X_validfull, y_train, y_valid = train_test_split(train, target, train_size=0.8, test_size=0.2, random_state=42) categorical_cols = [cname for cname in X_trainfull.columns if X_trainfull[cname].dtype == 'object'] numerical_cols = [cname for cname in X_trainfull.columns if X_trainfull[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_trainfull[my_cols].copy() X_valid = X_validfull[my_cols].copy() X_test = test[my_cols].copy() numeric = SimpleImputer(strategy='constant') cat = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numeric, numerical_cols), ('cat', cat, categorical_cols)]) model = XGBRegressor() clf = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) clf.fit(X_train, y_train) predictions = clf.predict(X_valid) from sklearn.model_selection import cross_val_score scores = -1 * cross_val_score(clf, train, target, cv=5, scoring='neg_mean_absolute_error') print('MAE scores:\n', scores)
code
72081346/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test = pd.read_csv('../input/30-days-of-ml/train.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.shape
code
72081346/cell_11
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test = pd.read_csv('../input/30-days-of-ml/train.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') categorical_cols = [cname for cname in X_trainfull.columns if X_trainfull[cname].dtype == 'object'] numerical_cols = [cname for cname in X_trainfull.columns if X_trainfull[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_trainfull[my_cols].copy() X_valid = X_validfull[my_cols].copy() X_test = test[my_cols].copy() numeric = SimpleImputer(strategy='constant') cat = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numeric, numerical_cols), ('cat', cat, categorical_cols)]) model = XGBRegressor() clf = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) clf.fit(X_train, y_train)
code
72081346/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72081346/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test = pd.read_csv('../input/30-days-of-ml/train.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.shape target = train.target X_trainfull, X_validfull, y_train, y_valid = train_test_split(train, target, train_size=0.8, test_size=0.2, random_state=42) target
code
72081346/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test = pd.read_csv('../input/30-days-of-ml/train.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train
code
104123689/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import os "\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n"
code
104123689/cell_7
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import json import matplotlib.pylab as plt import rasterio image = '/kaggle/input/hubmap-organ-segmentation/train_images/15329.tiff' tiff = rasterio.open(image) img = tiff.read() boundry = '/kaggle/input/hubmap-organ-segmentation/train_annotations/15329.json' with open(boundry) as json_file: data = json.load(json_file) rasterio.plot.show(tiff, title='15329') ig, ax = plt.subplots() ax.imshow(img[2, :, :]) for cord in data[0]: plt.scatter(cord[0], cord[1], color='red', alpha=0.05) plt.show() print('look at a sample image and mark it with the annotation to find the area of interest marked in red')
code
104123689/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import rasterio image = '/kaggle/input/hubmap-organ-segmentation/train_images/15329.tiff' tiff = rasterio.open(image) img = tiff.read()
code
128048094/cell_42
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) print(classification_report(y_test, model_logistic.predict(X_test))) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) ConfusionMatrixDisplay(cm_logistic).plot() plt.show()
code
128048094/cell_33
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) from sklearn.model_selection import train_test_split data_y = data_encoded[['Loan_Status']] data_x = data_encoded.drop(columns=['Loan_Status', 'Loan_ID']) X_train, X_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.2, random_state=1) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() data_encoded = data_encoded.drop(columns=['Loan_ID']) cols_to_scale = [col for col in data_encoded.columns if col != 'Loan_Status'] data_std = data_encoded[cols_to_scale].copy() data_std[cols_to_scale] = scaler.fit_transform(data_std[cols_to_scale]) data_std['Loan_Status'] = data_encoded['Loan_Status'] data_std.head(10)
code