path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88094115/cell_12
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred plt.scatter(x, y) plt.title('Linear Regression using Ordinary Least Square Method') plt.plot(x, y_pred, color='red', label='Best Fit Line') plt.legend() plt.show()
code
88094115/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') df.info()
code
2019997/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def preprocess_data(test_data=False): def encode_one_categorical_feature(column): le = LabelEncoder() ohe = OneHotEncoder(sparse=False) num_encoded = le.fit_transform(column.fillna('unk')) oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1)) return oh_encoded data = pd.read_csv('data/train.csv') target = ['SalePrice'] features = data.drop(['Id'] + target, axis=1).columns dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype']) dataset_types.reset_index(inplace=True) numeric_features = dataset_types.rename(columns={'index': 'feature'}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')] num_data = data[numeric_features] num_features = num_data.fillna(num_data.mean()).values scaler = StandardScaler() num_features_scaled = scaler.fit_transform(num_features) categorical_features = dataset_types.rename(columns={'index': 'feature'}).feature[dataset_types.datatype == 'object'] cat_data = data[categorical_features] cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns]) X = np.hstack((num_features_scaled, cat_features)) if test_data == True: return X y = data[target].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606) return (X_train, X_test, y_train, y_test) def plot_history(history): pass def keras_model(X_train, X_test, y_train, y_test): NUM_EPOCHS = 50 BATCH_SIZE = 128 inputs = Input(shape=(304,)) x = Dropout(0.2)(inputs) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.2)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss='mse', optimizer='adam') history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0) score = model.evaluate(X_test, y_test, verbose=0) return (history, model) test_data = preprocess_data(test_data=True)
code
2019997/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def preprocess_data(test_data=False): def encode_one_categorical_feature(column): le = LabelEncoder() ohe = OneHotEncoder(sparse=False) num_encoded = le.fit_transform(column.fillna('unk')) oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1)) return oh_encoded data = pd.read_csv('data/train.csv') target = ['SalePrice'] features = data.drop(['Id'] + target, axis=1).columns dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype']) dataset_types.reset_index(inplace=True) numeric_features = dataset_types.rename(columns={'index': 'feature'}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')] num_data = data[numeric_features] num_features = num_data.fillna(num_data.mean()).values scaler = StandardScaler() num_features_scaled = scaler.fit_transform(num_features) categorical_features = dataset_types.rename(columns={'index': 'feature'}).feature[dataset_types.datatype == 'object'] cat_data = data[categorical_features] cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns]) X = np.hstack((num_features_scaled, cat_features)) if test_data == True: return X y = data[target].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606) return (X_train, X_test, y_train, y_test) def plot_history(history): pass def keras_model(X_train, X_test, y_train, y_test): NUM_EPOCHS = 50 BATCH_SIZE = 128 inputs = Input(shape=(304,)) x = Dropout(0.2)(inputs) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.2)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss='mse', optimizer='adam') history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0) score = model.evaluate(X_test, y_test, verbose=0) return (history, model) X_train, X_test, y_train, y_test = preprocess_data()
code
2019997/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def preprocess_data(test_data=False): def encode_one_categorical_feature(column): le = LabelEncoder() ohe = OneHotEncoder(sparse=False) num_encoded = le.fit_transform(column.fillna('unk')) oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1)) return oh_encoded data = pd.read_csv('data/train.csv') target = ['SalePrice'] features = data.drop(['Id'] + target, axis=1).columns dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype']) dataset_types.reset_index(inplace=True) numeric_features = dataset_types.rename(columns={'index': 'feature'}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')] num_data = data[numeric_features] num_features = num_data.fillna(num_data.mean()).values scaler = StandardScaler() num_features_scaled = scaler.fit_transform(num_features) categorical_features = dataset_types.rename(columns={'index': 'feature'}).feature[dataset_types.datatype == 'object'] cat_data = data[categorical_features] cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns]) X = np.hstack((num_features_scaled, cat_features)) if test_data == True: return X y = data[target].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606) return (X_train, X_test, y_train, y_test) def plot_history(history): pass def keras_model(X_train, X_test, y_train, y_test): NUM_EPOCHS = 50 BATCH_SIZE = 128 inputs = Input(shape=(304,)) x = Dropout(0.2)(inputs) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.2)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss='mse', optimizer='adam') history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0) score = model.evaluate(X_test, y_test, verbose=0) return (history, model) model, history = keras_model(X_train, X_test, y_train, y_test)
code
2019997/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from hyperopt import Trials, STATUS_OK, tpe from hyperas import optim from hyperas.distributions import choice, uniform
code
2019997/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def preprocess_data(test_data=False): def encode_one_categorical_feature(column): le = LabelEncoder() ohe = OneHotEncoder(sparse=False) num_encoded = le.fit_transform(column.fillna('unk')) oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1)) return oh_encoded data = pd.read_csv('data/train.csv') target = ['SalePrice'] features = data.drop(['Id'] + target, axis=1).columns dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype']) dataset_types.reset_index(inplace=True) numeric_features = dataset_types.rename(columns={'index': 'feature'}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')] num_data = data[numeric_features] num_features = num_data.fillna(num_data.mean()).values scaler = StandardScaler() num_features_scaled = scaler.fit_transform(num_features) categorical_features = dataset_types.rename(columns={'index': 'feature'}).feature[dataset_types.datatype == 'object'] cat_data = data[categorical_features] cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns]) X = np.hstack((num_features_scaled, cat_features)) if test_data == True: return X y = data[target].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606) return (X_train, X_test, y_train, y_test) def plot_history(history): pass def keras_model(X_train, X_test, y_train, y_test): NUM_EPOCHS = 50 BATCH_SIZE = 128 inputs = Input(shape=(304,)) x = Dropout(0.2)(inputs) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.2)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss='mse', optimizer='adam') history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0) score = model.evaluate(X_test, y_test, verbose=0) return (history, model) predicted = model.model.predict(X_test)
code
2019997/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def preprocess_data(test_data=False): def encode_one_categorical_feature(column): le = LabelEncoder() ohe = OneHotEncoder(sparse=False) num_encoded = le.fit_transform(column.fillna('unk')) oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1)) return oh_encoded data = pd.read_csv('data/train.csv') target = ['SalePrice'] features = data.drop(['Id'] + target, axis=1).columns dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype']) dataset_types.reset_index(inplace=True) numeric_features = dataset_types.rename(columns={'index': 'feature'}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')] num_data = data[numeric_features] num_features = num_data.fillna(num_data.mean()).values scaler = StandardScaler() num_features_scaled = scaler.fit_transform(num_features) categorical_features = dataset_types.rename(columns={'index': 'feature'}).feature[dataset_types.datatype == 'object'] cat_data = data[categorical_features] cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns]) X = np.hstack((num_features_scaled, cat_features)) if test_data == True: return X y = data[target].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606) return (X_train, X_test, y_train, y_test) def plot_history(history): pass def keras_model(X_train, X_test, y_train, y_test): NUM_EPOCHS = 50 BATCH_SIZE = 128 inputs = Input(shape=(304,)) x = Dropout(0.2)(inputs) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.2)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) x = Dense(256)(x) x = Activation('relu')(x) x = Dropout(0.4)(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss='mse', optimizer='adam') history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0) score = model.evaluate(X_test, y_test, verbose=0) return (history, model) predicted = model.model.predict(X_test) plt.plot(y_test - predicted)
code
106198216/cell_21
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import format_number from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns from pyspark.sql.functions import format_number result = df_wmt.describe() result.select(result['summary'], format_number(result['Open'].cast('float'),2).alias('Open'), format_number(result['High'].cast('float'),2).alias('High'), format_number(result['Low'].cast('float'),2).alias('Low'), format_number(result['Close'].cast('float'),2).alias('Close'), result['Volume'].cast('int').alias('Volume') ).show() df_wmt.orderBy(df_wmt['High'].desc()).show()
code
106198216/cell_13
[ "text_html_output_1.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns df_wmt.describe().show()
code
106198216/cell_9
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns
code
106198216/cell_25
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import format_number from pyspark.sql.functions import mean from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns from pyspark.sql.functions import format_number result = df_wmt.describe() result.select(result['summary'], format_number(result['Open'].cast('float'),2).alias('Open'), format_number(result['High'].cast('float'),2).alias('High'), format_number(result['Low'].cast('float'),2).alias('Low'), format_number(result['Close'].cast('float'),2).alias('Close'), result['Volume'].cast('int').alias('Volume') ).show() from pyspark.sql.functions import mean df_wmt.select(mean('Close')).show()
code
106198216/cell_23
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import format_number from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns from pyspark.sql.functions import format_number result = df_wmt.describe() result.select(result['summary'], format_number(result['Open'].cast('float'),2).alias('Open'), format_number(result['High'].cast('float'),2).alias('High'), format_number(result['Low'].cast('float'),2).alias('Low'), format_number(result['Close'].cast('float'),2).alias('Close'), result['Volume'].cast('int').alias('Volume') ).show() df_wmt.orderBy(df_wmt['High'].desc()).select('Date').show(1)
code
106198216/cell_2
[ "text_plain_output_1.png" ]
pip install pyspark
code
106198216/cell_28
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import format_number from pyspark.sql.functions import max,min from pyspark.sql.functions import mean from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns from pyspark.sql.functions import format_number result = df_wmt.describe() result.select(result['summary'], format_number(result['Open'].cast('float'),2).alias('Open'), format_number(result['High'].cast('float'),2).alias('High'), format_number(result['Low'].cast('float'),2).alias('Low'), format_number(result['Close'].cast('float'),2).alias('Close'), result['Volume'].cast('int').alias('Volume') ).show() from pyspark.sql.functions import mean from pyspark.sql.functions import max, min df_wmt.select(max('Volume').alias('MAX Volume'), min('Volume').alias('MIN Volume')).show()
code
106198216/cell_8
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.show(1, vertical=True)
code
106198216/cell_15
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns df_wmt.describe().printSchema()
code
106198216/cell_3
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark
code
106198216/cell_17
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import format_number from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns from pyspark.sql.functions import format_number result = df_wmt.describe() result.select(result['summary'], format_number(result['Open'].cast('float'), 2).alias('Open'), format_number(result['High'].cast('float'), 2).alias('High'), format_number(result['Low'].cast('float'), 2).alias('Low'), format_number(result['Close'].cast('float'), 2).alias('Close'), result['Volume'].cast('int').alias('Volume')).show()
code
106198216/cell_10
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns df_wmt.printSchema()
code
106198216/cell_5
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.show()
code
16152737/cell_21
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary X_train_pca_df = pd.DataFrame(X_train_pca[:, 0:15]) X_train_pca_df.shape
code
16152737/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] print('Shape of Training data: ', X_train.shape, '\nShape of Testing data: ', X_test.shape, '\nShape of Training Labels: ', y_train.shape)
code
16152737/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
print('Eliminate features') filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree']
code
16152737/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') print('Missing values in train: ', train.isnull().sum().sum())
code
16152737/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout from keras.models import Sequential from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary X_train_pca_df = pd.DataFrame(X_train_pca[:, 0:15]) X_train_pca_df.shape y_train_nn = y_train.values.reshape(1, -1) model = Sequential() model.add(Dense(128, input_dim=15, kernel_initializer='uniform', activation='relu')) model.add(Dense(64, kernel_initializer='uniform', activation='relu')) model.add(Dense(32, kernel_initializer='uniform', activation='elu')) model.add(Dense(16, kernel_initializer='uniform', activation='relu')) model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) model.fit(X_train_pca_df, y_train_nn.T, epochs=10, batch_size=32)
code
16152737/cell_20
[ "text_plain_output_1.png" ]
from keras.models import Sequential from keras.layers import Dense, Dropout from keras.utils import to_categorical from keras.datasets import mnist from keras.utils.vis_utils import model_to_dot from IPython.display import SVG from keras.utils import np_utils
code
16152737/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) print('Extra features in train: ', uncommon_features)
code
16152737/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') print('train.shape:{} test.shape:{}'.format(train.shape, test.shape))
code
16152737/cell_19
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary X_train_pca_df = pd.DataFrame(X_train_pca[:, 0:15]) X_test_pca_df = pd.DataFrame(X_test_pca[:, 0:15]) X_test_pca_df.head()
code
16152737/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_curve, auc from sklearn.ensemble import GradientBoostingClassifier from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA print(os.listdir('../input'))
code
16152737/cell_18
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary X_train_pca_df = pd.DataFrame(X_train_pca[:, 0:15]) X_train_pca_df.head()
code
16152737/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) print('Total Number of Features: ', train_added.shape[1])
code
16152737/cell_16
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary summary = pca_summary(pca, X_train_pca)
code
16152737/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') train.head()
code
16152737/cell_17
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') uncommon_features = [] for i in train.columns: if i not in test.columns: uncommon_features.append(i) def add_features(data): df = data.copy() df['NEW_FD_SUMP'] = df['FlightDistance'] / (df['p0_p'] + df['p1_p'] + df['p2_p']) df['NEW5_lt'] = df['LifeTime'] * (df['p0_IP'] + df['p1_IP'] + df['p2_IP']) / 3 df['p_track_Chi2Dof_MAX'] = df.loc[:, ['p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof']].max(axis=1) df['flight_dist_sig2'] = (df['FlightDistance'] / df['FlightDistanceError']) ** 2 df['flight_dist_sig'] = df['FlightDistance'] / df['FlightDistanceError'] df['NEW_IP_dira'] = df['IP'] * df['dira'] df['p0p2_ip_ratio'] = df['IP'] / df['IP_p0p2'] df['p1p2_ip_ratio'] = df['IP'] / df['IP_p1p2'] df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1) df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1) df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf']].min(axis=1) df['NEW_iso_abc'] = df['isolationa'] * df['isolationb'] * df['isolationc'] df['NEW_iso_def'] = df['isolationd'] * df['isolatione'] * df['isolationf'] df['NEW_pN_IP'] = df['p0_IP'] + df['p1_IP'] + df['p2_IP'] df['NEW_pN_p'] = df['p0_p'] + df['p1_p'] + df['p2_p'] df['NEW_IP_pNpN'] = df['IP_p0p2'] * df['IP_p1p2'] df['NEW_pN_IPSig'] = df['p0_IPSig'] + df['p1_IPSig'] + df['p2_IPSig'] df['NEW_FD_LT'] = df['FlightDistance'] / df['LifeTime'] return df train_added = add_features(train) test_added = add_features(test) filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'CDF1', 'CDF2', 'CDF3', 'isolationb', 'isolationc', 'p0_pt', 'p1_pt', 'p2_pt', 'p0_p', 'p1_p', 'p2_p', 'p0_eta', 'p1_eta', 'p2_eta', 'isolationa', 'isolationb', 'isolationc', 'isolationd', 'isolatione', 'isolationf', 'p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT', 'p0_IP', 'p1_IP', 'p2_IP', 'IP_p0p2', 'IP_p1p2', 'p0_track_Chi2Dof', 'p1_track_Chi2Dof', 'p2_track_Chi2Dof', 'p0_IPSig', 'p1_IPSig', 'p2_IPSig', 'DOCAone', 'DOCAtwo', 'DOCAthree'] features = list((f for f in train_added.columns if f not in filter_out)) scaler = StandardScaler() X_train = scaler.fit_transform(train_added[features]) X_test = scaler.fit_transform(test_added[features]) y_train = train['signal'] pca = PCA().fit(X_train) X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) def pca_summary(pca, standardized_data, out=True): names = ['PC' + str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)] a = list(np.std(pca.transform(standardized_data), axis=0)) b = list(pca.explained_variance_ratio_) c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)] columns = pd.MultiIndex.from_tuples([('sdev', 'Standard deviation'), ('varprop', 'Proportion of Variance'), ('cumprop', 'Cumulative Proportion')]) summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns) return summary def screeplot(pca, standardized_values): y = np.std(pca.transform(standardized_values), axis=0) ** 2 x = np.arange(len(y)) + 1 plt.plot(x, y, 'o-') plt.xticks(x, ['Comp.' + str(i) for i in x], rotation=60) plt.ylabel('Variance') plt.show() screeplot(pca, X_train)
code
16152737/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/training.csv') test = pd.read_csv('../input/test.csv') print('Missing values in test: ', train.isnull().sum().sum())
code
17111990/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/twitter.png") Image("../input/images/Trump_New_York_Times_tweet_.jpg")
code
17111990/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/history-bigdata.jpg")
code
17111990/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/threev.png")
code
17111990/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/bda-696x394.jpg")
code
17111990/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/company.jpg")
code
17111990/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import os import numpy as np import pandas as pd import os print(os.listdir('../input')) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17111990/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from IPython.display import Image import os !ls ../input/ Image("../input/images/Management.png")
code
104116119/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') print((train_df.isna().sum() / train_df.shape[0])[train_df.isna().sum() / train_df.shape[0] > 0.4])
code
104116119/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) test_df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) categorical_columns = [c for c in train_df.columns if train_df[c].dtype == 'object'] categorical_columns len(categorical_columns)
code
104116119/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_folds_df = pd.read_csv('./train_folds.csv') train_folds_df = train_folds_df.drop(['Id'], axis=1) train_folds_df.head(1)
code
104116119/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) test_df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) categorical_columns = [c for c in train_df.columns if train_df[c].dtype == 'object'] categorical_columns len(categorical_columns) numerical_columns = [col for col in train_df.columns if train_df[col].dtypes != 'object'] numerical_columns len(numerical_columns)
code
104116119/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_df.head(2)
code
104116119/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test_df = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train_folds_df = pd.read_csv('./train_folds.csv') train_folds_df.head(1)
code
33096184/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.countplot(x='Survived', hue='Survived', data=df1)
code
33096184/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0]
code
33096184/cell_44
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc3 = df1[df1['Pclass'] == 3] df1_pc3.shape
code
33096184/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.head()
code
33096184/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.set(style='darkgrid') sns.set(style='darkgrid') df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape plt.figure(figsize=(8, 5)) sns.boxplot(x='Sex', y='Age', data=df1_pc1, palette='winter')
code
33096184/cell_39
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.set(style='darkgrid') sns.set(style='darkgrid') df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape df1_pc2 = df1[df1['Pclass'] == 2] df1_pc2.shape plt.figure(figsize=(8, 5)) sns.boxplot(x='Sex', y='Age', data=df1_pc2, palette='winter')
code
33096184/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape
code
33096184/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc3 = df1[df1['Pclass'] == 3] df1_pc3.shape df1_pc3_male = df1_pc3[df1_pc3['Sex'] == 'male'] df1_pc3_male['Age'].fillna(value=df1_pc3_male['Age'].mean(), inplace=True) df1_pc3_female = df1_pc3[df1_pc3['Sex'] == 'female'] df1_pc3_female['Age'].fillna(value=df1_pc3_female['Age'].mean(), inplace=True) df1_pc3 = df1_pc3_male.append(df1_pc3_female) df1_pc3.shape
code
33096184/cell_52
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape df1_pc1_male = df1_pc1[df1_pc1['Sex'] == 'male'] df1_pc1_female = df1_pc1[df1_pc1['Sex'] == 'female'] df1_pc1 = df1_pc1_male.append(df1_pc1_female) df1_pc1.shape df1_pc2 = df1[df1['Pclass'] == 2] df1_pc2.shape df1_pc2_male = df1_pc2[df1_pc2['Sex'] == 'male'] df1_pc2_male['Age'].fillna(value=df1_pc2_male['Age'].mean(), inplace=True) df1_pc2_female = df1_pc2[df1_pc2['Sex'] == 'female'] df1_pc2_female['Age'].fillna(value=df1_pc2_female['Age'].mean(), inplace=True) df1_pc2 = df1_pc2_male.append(df1_pc2_female) df1_pc2.shape df1_pc3 = df1[df1['Pclass'] == 3] df1_pc3.shape df1_pc3_male = df1_pc3[df1_pc3['Sex'] == 'male'] df1_pc3_male['Age'].fillna(value=df1_pc3_male['Age'].mean(), inplace=True) df1_pc3_female = df1_pc3[df1_pc3['Sex'] == 'female'] df1_pc3_female['Age'].fillna(value=df1_pc3_female['Age'].mean(), inplace=True) df1_pc3 = df1_pc3_male.append(df1_pc3_female) df1_pc3.shape df1_pc1 = df1_pc1.append(df1_pc2) df1 = df1_pc1.append(df1_pc3) df1.shape
code
33096184/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.tail()
code
33096184/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.set(style='darkgrid') sns.set(style='darkgrid') df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape df1_pc2 = df1[df1['Pclass'] == 2] df1_pc2.shape df1_pc3 = df1[df1['Pclass'] == 3] df1_pc3.shape plt.figure(figsize=(8, 5)) sns.boxplot(x='Sex', y='Age', data=df1_pc3, palette='winter')
code
33096184/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape
code
33096184/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape
code
33096184/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc2 = df1[df1['Pclass'] == 2] df1_pc2.shape
code
33096184/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum()
code
33096184/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc2 = df1[df1['Pclass'] == 2] df1_pc2.shape df1_pc2_male = df1_pc2[df1_pc2['Sex'] == 'male'] df1_pc2_male['Age'].fillna(value=df1_pc2_male['Age'].mean(), inplace=True) df1_pc2_female = df1_pc2[df1_pc2['Sex'] == 'female'] df1_pc2_female['Age'].fillna(value=df1_pc2_female['Age'].mean(), inplace=True) df1_pc2 = df1_pc2_male.append(df1_pc2_female) df1_pc2.shape
code
33096184/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape df1_pc1.head()
code
33096184/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.set(style='darkgrid') sns.set(style='darkgrid') sns.countplot(x='Survived', hue='Pclass', data=df1)
code
33096184/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape sns.set(style='darkgrid') sns.set(style='darkgrid') sns.countplot(x='Survived', hue='Sex', data=df1)
code
33096184/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.info()
code
33096184/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.isnull().sum() * 100 / df.shape[0] df1 = df.copy() df1.drop('Cabin', axis=1, inplace=True) df1.Embarked.isnull().sum() df1 = df1.dropna(axis=0, subset=['Embarked']) df1.shape df1_pc1 = df1[df1['Pclass'] == 1] df1_pc1.shape df1_pc1_male = df1_pc1[df1_pc1['Sex'] == 'male'] df1_pc1_female = df1_pc1[df1_pc1['Sex'] == 'female'] df1_pc1 = df1_pc1_male.append(df1_pc1_female) df1_pc1.shape
code
33096184/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df.shape df.describe()
code
1004561/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os os.listdir('../input')
code
1004561/cell_33
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test_images = test.values.astype('float32') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape train_images = train_images / 255 test_images = test_images / 255 np.std(train_images)
code
1004561/cell_55
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape history_dict = history.history history_dict.keys() loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.clf() acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.plot(epochs, loss_values, 'bo') plt.plot(epochs, val_loss_values, 'r^') plt.xlabel('Epochs') plt.ylabel('Loss') plt.show()
code
1004561/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_labels.shape train_labels[0:10]
code
1004561/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape for i in range(9): plt.subplot(330 + (i + 1)) plt.imshow(train_images_reshaped[i]) plt.title(train_labels[i])
code
1004561/cell_48
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape history_dict = history.history history_dict.keys() loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.clf() acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] plt.plot(epochs, acc_values, 'bo') plt.plot(epochs, val_acc_values, 'r^') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.show()
code
1004561/cell_45
[ "text_plain_output_1.png" ]
history = model.fit(train_images, train_labels, validation_split=0.05, nb_epoch=25, batch_size=64)
code
1004561/cell_28
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_labels.shape
code
1004561/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.models import Sequential from keras.layers import Dense, Dropout, Lambda, Flatten
code
1004561/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print(train.shape) train.head()
code
1004561/cell_38
[ "text_plain_output_1.png" ]
from keras.utils.np_utils import to_categorical import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_labels.shape from keras.utils.np_utils import to_categorical train_labels = to_categorical(train_labels) train_labels.shape train_labels[0:10]
code
1004561/cell_47
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape history_dict = history.history history_dict.keys() loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.plot(epochs, loss_values, 'bo') plt.plot(epochs, val_loss_values, 'r^') plt.xlabel('Epochs') plt.ylabel('Loss') plt.show()
code
1004561/cell_17
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print(test.shape) test.head()
code
1004561/cell_46
[ "text_plain_output_1.png" ]
history_dict = history.history history_dict.keys()
code
1004561/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_images_reshaped = train_images.reshape(train_images.shape[0], 28, 28) train_images_reshaped.shape
code
1004561/cell_22
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_labels[0:10]
code
1004561/cell_53
[ "image_output_1.png" ]
model = Sequential() model.add(Dense(64, activation='relu', input_dim=28 * 28)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.15)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.15)) model.add(Dense(10, activation='softmax')) model.compile(optimizer=RMSprop(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_images, train_labels, nb_epoch=15, batch_size=64, verbose=0)
code
1004561/cell_37
[ "text_plain_output_1.png" ]
from keras.utils.np_utils import to_categorical import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_images = train.ix[:, 1:].values.astype('float32') train_labels = train.ix[:, 0].values.astype('int32') train_labels.shape from keras.utils.np_utils import to_categorical train_labels = to_categorical(train_labels) train_labels.shape
code
90140081/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('E:\\Dockship\\Credict card\\TRAIN.csv') df.head()
code
90125749/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os directories = ['../input/csc4851-homework4/birds_400/test', '../input/csc4851-homework4/birds_400/train', '../input//csc4851-homework4/birds_400/valid'] for dir in directories: label = [] path = [] for dirname, _, filenames in os.walk(dir): for filename in filenames: label.append(os.path.split(dirname)[1]) path.append(os.path.join(dirname, filename)) if dir == directories[0]: df_test = pd.DataFrame(columns=['path', 'label']) df_test['path'] = path df_test['label'] = label elif dir == directories[1]: df_train = pd.DataFrame(columns=['path', 'label']) df_train['path'] = path df_train['label'] = label elif dir == directories[2]: df_valid = pd.DataFrame(columns=['path', 'label']) df_valid['path'] = path df_valid['label'] = label fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(15, 7), subplot_kw={'xticks': [], 'yticks': []}) df_sample = df_train.sample(15) df_sample.reset_index(drop=True, inplace=True) for i, ax in enumerate(axes.flat): ax.imshow(plt.imread(df_sample.path[i])) ax.set_title(df_sample.label[i]) plt.tight_layout() plt.show()
code
90125749/cell_2
[ "text_plain_output_1.png" ]
!ls
code
90125749/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90125749/cell_5
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os directories = ['../input/csc4851-homework4/birds_400/test', '../input/csc4851-homework4/birds_400/train', '../input//csc4851-homework4/birds_400/valid'] for dir in directories: label = [] path = [] for dirname, _, filenames in os.walk(dir): for filename in filenames: label.append(os.path.split(dirname)[1]) path.append(os.path.join(dirname, filename)) if dir == directories[0]: df_test = pd.DataFrame(columns=['path', 'label']) df_test['path'] = path df_test['label'] = label elif dir == directories[1]: df_train = pd.DataFrame(columns=['path', 'label']) df_train['path'] = path df_train['label'] = label elif dir == directories[2]: df_valid = pd.DataFrame(columns=['path', 'label']) df_valid['path'] = path df_valid['label'] = label df_train.head()
code
16144712/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import decomposition import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/train.csv') label = data['label'] pixels = data.drop('label', axis=1) from sklearn import decomposition pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(pixels) label = np.reshape(label.values, (label.shape[0], 1)) data_transformed = np.hstack((pca_data, label)) dframe = pd.DataFrame(data=data_transformed, columns=('pc1', 'pc2', 'label')) sns.FacetGrid(dframe, hue='label', size=5).map(plt.scatter, 'pc1', 'pc2').add_legend()
code
16144712/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn import decomposition from sklearn.manifold import TSNE import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/train.csv') label = data['label'] pixels = data.drop('label', axis=1) from sklearn import decomposition pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(pixels) label = np.reshape(label.values, (label.shape[0], 1)) data_transformed = np.hstack((pca_data, label)) dframe = pd.DataFrame(data=data_transformed, columns=('pc1', 'pc2', 'label')) sns.FacetGrid(dframe, hue='label', size=5).map(plt.scatter, 'pc1', 'pc2').add_legend() from sklearn.manifold import TSNE model = TSNE(n_components=2, random_state=0) tsne_transform = model.fit_transform(pixels[:10000]) tsne_trans_data = np.hstack((tsne_transform, label[:10000])) tsne_dframe = pd.DataFrame(data=tsne_trans_data, columns=('c1', 'c2', 'label')) sns.FacetGrid(tsne_dframe, hue='label', height=5).map(plt.scatter, 'c1', 'c2').add_legend()
code
104120795/cell_42
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers corr = df[features].corr() outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) for feat in features: skew = df[feat].skew() X = df[features] y = df['Type'] seed = 7 test_size = 0.2 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed) features_boxcox = [] for feature in features: bc_transformed, _ = boxcox(df[feature] + 1) features_boxcox.append(bc_transformed) features_boxcox = np.column_stack(features_boxcox) df_bc = pd.DataFrame(data=features_boxcox, columns=features) df_bc['Type'] = df['Type'] for feature in features: fig, ax = plt.subplots(1,2,figsize=(7,3.5)) ax[0].hist(df[feature], color='blue', bins=30, alpha=0.3, label='Skew = %s' %(str(round(df[feature].skew(),3))) ) ax[0].set_title(str(feature)) ax[0].legend(loc=0) ax[1].hist(df_bc[feature], color='red', bins=30, alpha=0.3, label='Skew = %s' %(str(round(df_bc[feature].skew(),3))) ) ax[1].set_title(str(feature)+' after a Box-Cox transformation') ax[1].legend(loc=0) plt.show() for feature in features: delta = np.abs(df_bc[feature].skew() / df[feature].skew()) pca = PCA(random_state=seed) pca.fit(X_train) var_exp = pca.explained_variance_ratio_ cum_var_exp = np.cumsum(var_exp) plt.figure(figsize=(8, 6)) plt.bar(range(1, len(cum_var_exp) + 1), var_exp, align='center', label='individual variance explained', alpha=0.7) plt.step(range(1, len(cum_var_exp) + 1), cum_var_exp, where='mid', label='cumulative variance explained', color='red') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(np.arange(1, len(var_exp) + 1, 1)) plt.legend(loc='center right') plt.show()
code
104120795/cell_9
[ "image_output_1.png" ]
import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes df['Type'].value_counts()
code
104120795/cell_25
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) print(df.shape)
code