path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90109221/cell_8
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape
code
90109221/cell_16
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape y_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] y_test_final = y_test_final.iloc[:, 0] X_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] X_test_final = X_test_final.iloc[:, 1:785] y_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] y_train_final = y_train_final.iloc[:, 0] X_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] X_train_final = X_train_final.iloc[:, 1:785] lda1 = LDA(n_components=1) X_train_r21 = lda1.fit(X_train_final, y_train_final) y_pred1 = lda1.predict(X_test_final) print(accuracy_score(y_test_final, y_pred1)) print(y_pred1.shape)
code
90109221/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') mnist_train.head()
code
90109221/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape y_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] y_test_final = y_test_final.iloc[:, 0] X_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] X_test_final = X_test_final.iloc[:, 1:785] y_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] y_train_final = y_train_final.iloc[:, 0] X_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] X_train_final = X_train_final.iloc[:, 1:785] lda1 = LDA(n_components=1) X_train_r21 = lda1.fit(X_train_final, y_train_final) y_pred1 = lda1.predict(X_test_final) w = lda1.coef_ w.shape
code
90109221/cell_24
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape y_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] y_test_final = y_test_final.iloc[:, 0] X_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] X_test_final = X_test_final.iloc[:, 1:785] y_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] y_train_final = y_train_final.iloc[:, 0] X_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] X_train_final = X_train_final.iloc[:, 1:785] lda1 = LDA(n_components=1) X_train_r21 = lda1.fit(X_train_final, y_train_final) y_pred1 = lda1.predict(X_test_final) w = lda1.coef_ w.shape w0 = lda1.intercept_ np.transpose(w0).shape X_test_final1 = X_test_final.to_numpy(dtype='uint8') X_attack = X_test_final1 - (X_test_final1 @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda1.predict(X_attack)
code
90109221/cell_14
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape y_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] y_test_final = y_test_final.iloc[:, 0] X_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] X_test_final = X_test_final.iloc[:, 1:785] y_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] y_train_final = y_train_final.iloc[:, 0] X_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] X_train_final = X_train_final.iloc[:, 1:785] print(y_train_final) print(np.shape(y_train_final)) print(X_train_final) print(np.shape(X_train_final))
code
90109221/cell_10
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape print(f'Classification report for classifier {lda}:\n{metrics.classification_report(y_test, y_pred)}\n')
code
90109221/cell_27
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import numpy as np import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_test = mnist_test.iloc[:, 1:785] y_test = mnist_test.iloc[:, 0] y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, y_pred) y_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] y_test_final = y_test_final.iloc[:, 0] X_test_final = mnist_test.loc[mnist_test['label'].isin([3, 8])] X_test_final = X_test_final.iloc[:, 1:785] y_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] y_train_final = y_train_final.iloc[:, 0] X_train_final = mnist_train.loc[mnist_train['label'].isin([3, 8])] X_train_final = X_train_final.iloc[:, 1:785] lda1 = LDA(n_components=1) X_train_r21 = lda1.fit(X_train_final, y_train_final) y_pred1 = lda1.predict(X_test_final) w = lda1.coef_ w.shape w0 = lda1.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test_final, y_pred1) X_test_final1 = X_test_final.to_numpy(dtype='uint8') X_attack = X_test_final1 - (X_test_final1 @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda1.predict(X_attack) disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test_final, Y_attack) disp.figure_.suptitle('Confusion Matrix') print(f'Confusion matrix:\n{disp.confusion_matrix}') plt.show()
code
90109221/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_test = pd.read_csv('../input/mnist-in-csv/mnist_test.csv') X_train = mnist_train.iloc[:, 1:785] y_train = mnist_train.iloc[:, 0] lda = LDA(n_components=9) X_train_r2 = lda.fit(X_train, y_train) X_train_r2
code
129010847/cell_9
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import transformers import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('sai1881/bloom-560m-finetuned-Instruct-DB-v', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) prompt = 'Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: ' inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) prompt = 'Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: ' inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split('<By Manoj>')[0])
code
129010847/cell_4
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import transformers import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('sai1881/bloom-560m-finetuned-Instruct-DB-v', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split('<By Manoj>')[0])
code
129010847/cell_8
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import transformers import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('sai1881/bloom-560m-finetuned-Instruct-DB-v', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) prompt = 'Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: ' inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split('<By Manoj>')[0])
code
129010847/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import transformers import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('sai1881/bloom-560m-finetuned-Instruct-DB-v', torch_dtype=torch.float16) model = model.to('cuda:0')
code
129010847/cell_5
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import transformers import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m') model = AutoModelForCausalLM.from_pretrained('sai1881/bloom-560m-finetuned-Instruct-DB-v', torch_dtype=torch.float16) model = model.to('cuda:0') prompt = "Instruction: When did Virgin Australia start operating?<END> \ncontext: Virgin Australia, the trading name of Virgin Australia Airlines Pty Ltd, is an Australian-based airline. It is the largest airline by fleet size to use the Virgin brand. It commenced services on 31 August 2000 as Virgin Blue, with two aircraft on a single route. It suddenly found itself as a major airline in Australia's domestic market after the collapse of Ansett Australia in September 2001. The airline has since grown to directly serve 32 cities in Australia, from hubs in Brisbane, Melbourne and Sydney.<END> \n<END>" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) prompt = 'Instruction: Why can camels survive for long without water? <END> \ncontext: <END> \n \nresponse: ' inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str.split('<By Manoj>')[0])
code
33106189/cell_9
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB data_train = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') data_total = data_train.append(data_test, ignore_index=True) data_train_length = len(data_train.index) for c in ['Age', 'Fare']: g = sns.FacetGrid(data_train, col='Survived') g.map(plt.hist, c) corr = data_train.corr() age_mean_by_pclass = data_total[['Pclass', 'Age']].groupby('Pclass').mean() data_total['Age'] = data_total.apply(lambda row: age_mean_by_pclass.loc[row['Pclass'], 'Age'] if np.isnan(row['Age']) else row['Age'], axis=1) fare_median_by_pclass = data_total[['Pclass', 'Fare']].groupby('Pclass').median() data_total['Fare'] = data_total.apply(lambda row: fare_median_by_pclass.loc[row['Pclass'], 'Fare'] if np.isnan(row['Fare']) else row['Fare'], axis=1) data_total['Embarked'].fillna(data_total['Embarked'].mode()[0], inplace=True) data_total['isAlone'] = (data_total['SibSp'] + data_total['Parch'] == 0) * 1 data_total['bigGroup'] = (data_total['SibSp'] + data_total['Parch'] > 3) * 1 data_total['Title'] = data_total['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) data_total['Title'] = data_total['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other') data_total['Title'] = data_total['Title'].replace(['Mlle', 'Ms', 'Mme'], 'Miss') age_bin_count = 10 data_total['Age_Bin'] = pd.cut(data_total['Age'], age_bin_count, labels=list(range(1, age_bin_count + 1))) fare_bin_count = 5 data_total['Fare_Bin'] = pd.qcut(data_total['Fare'], fare_bin_count, labels=list(range(1, fare_bin_count + 1))) enc = OneHotEncoder().fit(data_total[['Sex', 'Embarked', 'Title']]) enc_features_arr = enc.transform(data_total[['Sex', 'Embarked', 'Title']]).toarray() enc_features_labels = [label for cat in enc.categories_ for label in cat] enc_features_df = pd.DataFrame(enc_features_arr, columns=enc_features_labels).drop(columns=['male', 'S', 'Other']) data_total = data_total.join(enc_features_df) data_total = data_total.drop(columns=['PassengerId', 'Name', 'Cabin', 'Ticket', 'SibSp', 'Parch', 'Age', 'Fare', 'Sex', 'Embarked', 'Title']) data_train = data_total[:data_train_length] X = data_train.drop(columns=['Survived'], axis=1) y = data_train['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) scaler = StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) """#first round of grid search svc_params = [{'C': [1,2,3,4,5], 'kernel': ['linear', 'rbf', 'sigmoid'], 'gamma': [.1, .2, .3, .4, .5, .6, .7, .8, .9]}, {'C': [1,2,3,4,5], 'kernel': ['linear']}] #output: {'C': 1, 'gamma': 0.2, 'kernel': 'rbf'}, 0.8264360018091361 knn_params = [{'n_neighbors': [3,4,5,6,7,8], 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'], 'leaf_size': [25, 30, 35, 40, 50], 'p': [1, 2]}] #output: {'algorithm': 'brute', 'leaf_size': 25, 'n_neighbors': 8, 'p': 2, 'weights': 'uniform'}, 0.8294663048394391 rfc_params = [{'n_estimators': [20, 50, 100, 150, 200], 'criterion': ['gini', 'entropy']}] #output: {'criterion': 'entropy', 'n_estimators': 100}, 0.7979873360470375 """ svc_params = [{'C': [0.2, 0.4, 0.5, 1, 2, 3, 4, 5], 'kernel': ['rbf'], 'gamma': [0.1, 0.15, 0.2, 0.25, 0.3]}] knn_params = [{'n_neighbors': [7, 8, 9, 10], 'weights': ['uniform'], 'algorithm': ['brute'], 'p': [2]}] rfc_params = [{'n_estimators': [80, 90, 100, 110, 120, 130, 140], 'criterion': ['entropy']}] classifier_names = ['SVC', 'KNN', 'RFC'] params = [svc_params, knn_params, rfc_params] classifier_objs = [SVC(), KNeighborsClassifier(), RandomForestClassifier()] best_params = {} for i in range(len(classifier_names)): classifier_objs[i].fit(X_train, y_train) grid_search = GridSearchCV(estimator=classifier_objs[i], param_grid=params[i], scoring='accuracy', cv=10).fit(X_train, y_train) best_params[classifier_names[i]] = grid_search.best_params_ classifier_tuned = [SVC(C=best_params['SVC']['C'], gamma=best_params['SVC']['gamma'], kernel=best_params['SVC']['kernel']), KNeighborsClassifier(algorithm=best_params['KNN']['algorithm'], n_neighbors=best_params['KNN']['n_neighbors'], p=best_params['KNN']['p'], weights=best_params['KNN']['weights']), LogisticRegression(), RandomForestClassifier(criterion=best_params['RFC']['criterion'], n_estimators=best_params['RFC']['n_estimators']), GaussianNB()] predictions_train = [] for clf in classifier_tuned: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) predictions_train.append(y_pred) print(clf) print(confusion_matrix(y_test, y_pred)) print('accuracy score: ' + str(accuracy_score(y_test, y_pred))) print('cross val score: ' + str(cross_val_score(clf, X_train, y_train).mean())) print('–––––––––') predictions_train = pd.DataFrame(predictions_train).transpose() predictions_train['y_pred_voted'] = predictions_train.mean(axis=1).round() predictions_train_voted = predictions_train['y_pred_voted'].values print('Result after voting:') print(confusion_matrix(y_test, predictions_train_voted)) print(accuracy_score(y_test, predictions_train_voted))
code
33106189/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB data_train = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') data_total = data_train.append(data_test, ignore_index=True) data_train_length = len(data_train.index) for c in ['Age', 'Fare']: g = sns.FacetGrid(data_train, col='Survived') g.map(plt.hist, c) corr = data_train.corr() data_total.info()
code
33106189/cell_6
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB data_train = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') data_total = data_train.append(data_test, ignore_index=True) data_train_length = len(data_train.index) for c in ['Age', 'Fare']: g = sns.FacetGrid(data_train, col='Survived') g.map(plt.hist, c) corr = data_train.corr() age_mean_by_pclass = data_total[['Pclass', 'Age']].groupby('Pclass').mean() data_total['Age'] = data_total.apply(lambda row: age_mean_by_pclass.loc[row['Pclass'], 'Age'] if np.isnan(row['Age']) else row['Age'], axis=1) fare_median_by_pclass = data_total[['Pclass', 'Fare']].groupby('Pclass').median() data_total['Fare'] = data_total.apply(lambda row: fare_median_by_pclass.loc[row['Pclass'], 'Fare'] if np.isnan(row['Fare']) else row['Fare'], axis=1) data_total['Embarked'].fillna(data_total['Embarked'].mode()[0], inplace=True) data_total['isAlone'] = (data_total['SibSp'] + data_total['Parch'] == 0) * 1 data_total['bigGroup'] = (data_total['SibSp'] + data_total['Parch'] > 3) * 1 data_total['Title'] = data_total['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) data_total['Title'] = data_total['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other') data_total['Title'] = data_total['Title'].replace(['Mlle', 'Ms', 'Mme'], 'Miss') age_bin_count = 10 data_total['Age_Bin'] = pd.cut(data_total['Age'], age_bin_count, labels=list(range(1, age_bin_count + 1))) fare_bin_count = 5 data_total['Fare_Bin'] = pd.qcut(data_total['Fare'], fare_bin_count, labels=list(range(1, fare_bin_count + 1))) enc = OneHotEncoder().fit(data_total[['Sex', 'Embarked', 'Title']]) enc_features_arr = enc.transform(data_total[['Sex', 'Embarked', 'Title']]).toarray() enc_features_labels = [label for cat in enc.categories_ for label in cat] enc_features_df = pd.DataFrame(enc_features_arr, columns=enc_features_labels).drop(columns=['male', 'S', 'Other']) data_total = data_total.join(enc_features_df) data_total = data_total.drop(columns=['PassengerId', 'Name', 'Cabin', 'Ticket', 'SibSp', 'Parch', 'Age', 'Fare', 'Sex', 'Embarked', 'Title']) data_total.head()
code
33106189/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33106189/cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB data_train = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') data_total = data_train.append(data_test, ignore_index=True) data_train_length = len(data_train.index) for c in ['Pclass', 'Sex', 'Embarked', 'SibSp', 'Parch', 'Survived']: sns.catplot(x=c, kind='count', data=data_train) plt.show() for c in ['Age', 'Fare']: data_train[c].hist().set(xlabel=c) plt.show() for c in ['Pclass', 'Sex', 'Embarked', 'SibSp', 'Parch']: sns.catplot(x=c, y='Survived', kind='bar', data=data_train) plt.show() for c in ['Age', 'Fare']: g = sns.FacetGrid(data_train, col='Survived') g.map(plt.hist, c) plt.show() corr = data_train.corr() sns.boxplot(x='Pclass', y='Age', hue='Survived', data=data_train) plt.show() sns.boxplot(x='Pclass', y='Fare', hue='Survived', data=data_train[data_train['Fare'] < 200]) plt.show()
code
33106189/cell_5
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB data_train = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') data_total = data_train.append(data_test, ignore_index=True) data_train_length = len(data_train.index) for c in ['Age', 'Fare']: g = sns.FacetGrid(data_train, col='Survived') g.map(plt.hist, c) corr = data_train.corr() age_mean_by_pclass = data_total[['Pclass', 'Age']].groupby('Pclass').mean() data_total['Age'] = data_total.apply(lambda row: age_mean_by_pclass.loc[row['Pclass'], 'Age'] if np.isnan(row['Age']) else row['Age'], axis=1) fare_median_by_pclass = data_total[['Pclass', 'Fare']].groupby('Pclass').median() data_total['Fare'] = data_total.apply(lambda row: fare_median_by_pclass.loc[row['Pclass'], 'Fare'] if np.isnan(row['Fare']) else row['Fare'], axis=1) data_total['Embarked'].fillna(data_total['Embarked'].mode()[0], inplace=True) data_total.info()
code
90154899/cell_25
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.svm import SVC from sklearn.utils import shuffle from termcolor import colored import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from termcolor import colored from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T diabetic_map = {'No, borderline diabetes': 'No', 'Yes (during pregnancy)': 'Yes', 'Yes': 'Yes', 'No': 'No'} data.Diabetic = data.Diabetic.map(diabetic_map) categorical_features = ['Smoking', 'AlcoholDrinking', 'Stroke', 'DiffWalking', 'Sex', 'AgeCategory', 'Race', 'Diabetic', 'PhysicalActivity', 'GenHealth', 'Asthma', 'KidneyDisease', 'SkinCancer', 'HeartDisease'] numerical_columns = [x for x in data.columns if x not in categorical_features] def plotting_function(feature): fig, axes = plt.subplots(ncols = 3,figsize = (25,5)) heart_disease_by_feature = pd.DataFrame(data[data["HeartDisease"] == "Yes"].groupby(feature).count()["HeartDisease"]) heart_disease_by_feature.sort_values(by = "HeartDisease", ascending = False).plot(kind ="bar", ax = axes[0]) axes[1].pie(heart_disease_by_feature["HeartDisease"], labels = heart_disease_by_feature.index, autopct = "%.2f") data[feature].value_counts().sort_values(ascending = False).plot(kind = "bar", ax = axes[2], color = "green") axes[0].set_title(f"{feature} Among People Having Heart Disease (Bar)", size = 15) axes[1].set_title(f"{feature} Among People Having Heart Disease (Pie)", size = 15) axes[2].set_title(f"{feature} Sample Count") axes[0].set_xlabel("") axes[2].set_xlabel("") axes[2].set_ylabel("") plt.show() for categoric_col in categorical_features[:-1]: plotting_function(categoric_col) class Preprocessing: def __init__(self, data, categorical_features, numerical_columns, scaling=False, n_splits=None, n_inner=None, broad_encoding=None, smooth=None, smote=False): self.smooth = smooth self.numerical_columns = numerical_columns self.smote = smote self.data = data self.categorical_features = categorical_features self.scaling = scaling self.n_splits = n_splits self.n_inner = n_inner self.broad_encoding = broad_encoding def scaling_func(self): num_features = [feature for feature in self.data.columns if feature not in self.categorical_features] for num_feature in num_features: self.data[num_feature] = (self.data[num_feature] - self.data[num_feature].mean()) / self.data[num_feature].std() def label_encode(self, broad_encoding=None): label_encoding_features = [] for categorical_feature in self.categorical_features: if data[categorical_feature].nunique() == 2: label_encoding_features.append(categorical_feature) multivariable_features = [x for x in self.categorical_features if x not in label_encoding_features] label_encoder_map = {} for label_encoding_feature in label_encoding_features: le = LabelEncoder() le.fit(self.data[label_encoding_feature].values.reshape(-1, 1)) label_encode_map = dict(zip(le.classes_, le.transform(le.classes_))) label_encoder_map[label_encoding_feature] = label_encode_map encoded_data = le.transform(self.data[label_encoding_feature]) self.data[label_encoding_feature] = encoded_data if self.broad_encoding == 'OHE': self.data = pd.get_dummies(self.data, columns=multivariable_features, drop_first=True) elif self.broad_encoding == 'SMOOTH': for cat_col in multivariable_features: global_mean = self.data['HeartDisease'].mean() sample_size = self.data.groupby(cat_col).size() grouped_mean = self.data.groupby(cat_col)['HeartDisease'].mean() smooth_action = (sample_size * grouped_mean + global_mean * self.smooth) / (sample_size + self.smooth) self.data[cat_col] = self.data[cat_col].map(smooth_action) elif self.broad_encoding == 'CROSS_VAL': for multi_feat in multivariable_features: global_mean = self.data['HeartDisease'].mean() skf = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) for train_idx, test_idx in skf.split(self.data[multi_feat], self.data['HeartDisease']): train_data, test_data = (self.data.loc[train_idx], self.data.loc[test_idx]) train_data_mean = train_data.mean() grouped_mean_df = pd.DataFrame() skf_inner = StratifiedKFold(n_splits=self.n_inner, shuffle=True, random_state=42) for inner_train_idx, _ in skf_inner.split(train_data[multi_feat], train_data['HeartDisease']): inner_train_data = self.data.loc[inner_train_idx] grouped_mean = inner_train_data.groupby(multi_feat)['HeartDisease'].mean() grouped_mean_df = pd.concat([grouped_mean_df, grouped_mean], axis=1).fillna(train_data_mean) grouped_mean_map = grouped_mean_df.mean(axis=1) self.data.loc[test_idx, multi_feat] = self.data.loc[test_idx, multi_feat].map(grouped_mean_map) def smote_upsample(self): data = self.data.copy() oversampled_numerics_0 = pd.DataFrame() oversampled_categorics_0 = pd.DataFrame() oversampled_numerics_1 = pd.DataFrame() for _ in range(len(data)): random = np.random.random() if random < 0.5: choice_0_numerics = data[data['HeartDisease'] == 'No'][self.numerical_columns].sample(n=1).reset_index(drop=True) choice_0_categorics = data[data['HeartDisease'] == 'No'][self.categorical_features].sample(n=1).reset_index(drop=True) oversampled_numerics_0 = pd.concat([oversampled_numerics_1, choice_0_numerics]) oversampled_categorics_0 = pd.concat([oversampled_categorics_0, choice_0_categorics]) else: choice_1_numerics = data[data['HeartDisease'] == 'Yes'][self.numerical_columns].sample(n=1).reset_index(drop=True) choice_2_numerics = data[data['HeartDisease'] == 'Yes'][self.numerical_columns].sample(n=1).reset_index(drop=True) constant = np.random.random() synthetic_numerics = choice_1_numerics * constant + choice_2_numerics * (1 - constant) oversampled_numerics_1 = pd.concat([oversampled_numerics_1, synthetic_numerics]) oversampled_numerics_0.reset_index(drop=True, inplace=True) oversampled_numerics_1.reset_index(drop=True, inplace=True) oversampled_categorics_0.reset_index(drop=True, inplace=True) neighbors = KNeighborsClassifier(n_neighbors=3) neighbors.fit(data[self.numerical_columns], data[self.categorical_features[:-1]]) oversampled_categorics_1 = pd.DataFrame(neighbors.predict(oversampled_numerics_1), columns=self.categorical_features[:-1]) oversampled_categorics_1['HeartDisease'] = 'Yes' oversampled_categorics = pd.concat([oversampled_categorics_0, oversampled_categorics_1]) oversampled_numerics = pd.concat([oversampled_numerics_0, oversampled_numerics_1]) oversampled_data = oversampled_numerics.join(oversampled_categorics) oversampled_data = shuffle(oversampled_data) oversampled_data.reset_index(drop=True, inplace=True) del data return oversampled_data def transform(self): if self.smote: self.data = self.smote_upsample() else: pass if self.scaling: self.scaling_func() else: pass self.label_encode() return self.data.copy() zeros = data[data['HeartDisease'] == 'No'].sample(n=int(len(data) / 50), replace=False) ones = data[data['HeartDisease'] == 'Yes'].sample(n=int(len(data) / 500), replace=False) small_data = shuffle(pd.concat([zeros, ones])) small_data.reset_index(drop=True, inplace=True) def scores(y_true, y_pred): y_true = np.array(y_true) y_pred = np.array(y_pred) tp = 0 fn = 0 for i in range(len(y_true)): if y_true[i] == y_pred[i]: tp += 1 else: fn += 1 acc_score = tp / (tp + fn) idx_0 = np.where(y_true == 0)[0] tp_0 = 0 fn_0 = 0 for i in idx_0: if y_true[i] == y_pred[i]: tp_0 += 1 else: fn_0 += 1 idx_1 = np.where(y_true == 1)[0] tp_1 = 0 fn_1 = 0 for i in idx_1: if y_true[i] == y_pred[i]: tp_1 += 1 else: fn_1 += 1 prec_score_0 = tp_0 / (tp_0 + fn_0) prec_score_1 = tp_1 / (tp_1 + fn_1) tp_0 = 0 tp_1 = 0 no_0 = len(np.where(y_true == 0)[0]) no_1 = len(np.where(y_true == 1)[0]) for i in np.where(y_true == 0)[0]: if y_true[i] == y_pred[i]: tp_0 += 1 else: continue for i in np.where(y_true == 1)[0]: if y_true[i] == y_pred[i]: tp_1 += 1 else: continue recall_0 = tp_0 / no_0 recall_1 = tp_1 / no_1 f1_0 = 2 * (prec_score_0 * recall_0) / (prec_score_0 + recall_0) f1_1 = 2 * (prec_score_1 * recall_1) / (prec_score_1 + recall_1) def perform_func(preprocessed_data, model, scoring=scores): model_use = model() train_cols = [x for x in preprocessed_data.columns if x != 'HeartDisease'] for i in range(5): test_indicies = np.random.randint(low=0, high=len(preprocessed_data), size=int(len(preprocessed_data) / 5)) train_indicies = np.array([x for x in range(0, len(preprocessed_data)) if x not in test_indicies]) model_use.fit(preprocessed_data.loc[train_indicies, train_cols], preprocessed_data.loc[train_indicies, 'HeartDisease']) preds = model_use.predict(preprocessed_data.iloc[test_indicies, 1:]) scoring(preprocessed_data.loc[test_indicies, 'HeartDisease'], preds) preprocessor = Preprocessing(small_data, categorical_features, numerical_columns, scaling=True, broad_encoding='OHE') encoded_data = preprocessor.transform() model_list = [('SVC', SVC), ('LogisticRegression', LogisticRegression), ('RandomForestClassifier', RandomForestClassifier)] for name, model in model_list: print(f'{name} Model Results') perform_func(encoded_data, model)
code
90154899/cell_20
[ "image_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.utils import shuffle import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from termcolor import colored from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T diabetic_map = {'No, borderline diabetes': 'No', 'Yes (during pregnancy)': 'Yes', 'Yes': 'Yes', 'No': 'No'} data.Diabetic = data.Diabetic.map(diabetic_map) categorical_features = ['Smoking', 'AlcoholDrinking', 'Stroke', 'DiffWalking', 'Sex', 'AgeCategory', 'Race', 'Diabetic', 'PhysicalActivity', 'GenHealth', 'Asthma', 'KidneyDisease', 'SkinCancer', 'HeartDisease'] numerical_columns = [x for x in data.columns if x not in categorical_features] def plotting_function(feature): fig, axes = plt.subplots(ncols = 3,figsize = (25,5)) heart_disease_by_feature = pd.DataFrame(data[data["HeartDisease"] == "Yes"].groupby(feature).count()["HeartDisease"]) heart_disease_by_feature.sort_values(by = "HeartDisease", ascending = False).plot(kind ="bar", ax = axes[0]) axes[1].pie(heart_disease_by_feature["HeartDisease"], labels = heart_disease_by_feature.index, autopct = "%.2f") data[feature].value_counts().sort_values(ascending = False).plot(kind = "bar", ax = axes[2], color = "green") axes[0].set_title(f"{feature} Among People Having Heart Disease (Bar)", size = 15) axes[1].set_title(f"{feature} Among People Having Heart Disease (Pie)", size = 15) axes[2].set_title(f"{feature} Sample Count") axes[0].set_xlabel("") axes[2].set_xlabel("") axes[2].set_ylabel("") plt.show() for categoric_col in categorical_features[:-1]: plotting_function(categoric_col) class Preprocessing: def __init__(self, data, categorical_features, numerical_columns, scaling=False, n_splits=None, n_inner=None, broad_encoding=None, smooth=None, smote=False): self.smooth = smooth self.numerical_columns = numerical_columns self.smote = smote self.data = data self.categorical_features = categorical_features self.scaling = scaling self.n_splits = n_splits self.n_inner = n_inner self.broad_encoding = broad_encoding def scaling_func(self): num_features = [feature for feature in self.data.columns if feature not in self.categorical_features] for num_feature in num_features: self.data[num_feature] = (self.data[num_feature] - self.data[num_feature].mean()) / self.data[num_feature].std() def label_encode(self, broad_encoding=None): label_encoding_features = [] for categorical_feature in self.categorical_features: if data[categorical_feature].nunique() == 2: label_encoding_features.append(categorical_feature) multivariable_features = [x for x in self.categorical_features if x not in label_encoding_features] label_encoder_map = {} for label_encoding_feature in label_encoding_features: le = LabelEncoder() le.fit(self.data[label_encoding_feature].values.reshape(-1, 1)) label_encode_map = dict(zip(le.classes_, le.transform(le.classes_))) label_encoder_map[label_encoding_feature] = label_encode_map encoded_data = le.transform(self.data[label_encoding_feature]) self.data[label_encoding_feature] = encoded_data if self.broad_encoding == 'OHE': self.data = pd.get_dummies(self.data, columns=multivariable_features, drop_first=True) elif self.broad_encoding == 'SMOOTH': for cat_col in multivariable_features: global_mean = self.data['HeartDisease'].mean() sample_size = self.data.groupby(cat_col).size() grouped_mean = self.data.groupby(cat_col)['HeartDisease'].mean() smooth_action = (sample_size * grouped_mean + global_mean * self.smooth) / (sample_size + self.smooth) self.data[cat_col] = self.data[cat_col].map(smooth_action) elif self.broad_encoding == 'CROSS_VAL': for multi_feat in multivariable_features: global_mean = self.data['HeartDisease'].mean() skf = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) for train_idx, test_idx in skf.split(self.data[multi_feat], self.data['HeartDisease']): train_data, test_data = (self.data.loc[train_idx], self.data.loc[test_idx]) train_data_mean = train_data.mean() grouped_mean_df = pd.DataFrame() skf_inner = StratifiedKFold(n_splits=self.n_inner, shuffle=True, random_state=42) for inner_train_idx, _ in skf_inner.split(train_data[multi_feat], train_data['HeartDisease']): inner_train_data = self.data.loc[inner_train_idx] grouped_mean = inner_train_data.groupby(multi_feat)['HeartDisease'].mean() grouped_mean_df = pd.concat([grouped_mean_df, grouped_mean], axis=1).fillna(train_data_mean) grouped_mean_map = grouped_mean_df.mean(axis=1) self.data.loc[test_idx, multi_feat] = self.data.loc[test_idx, multi_feat].map(grouped_mean_map) def smote_upsample(self): data = self.data.copy() oversampled_numerics_0 = pd.DataFrame() oversampled_categorics_0 = pd.DataFrame() oversampled_numerics_1 = pd.DataFrame() for _ in range(len(data)): random = np.random.random() if random < 0.5: choice_0_numerics = data[data['HeartDisease'] == 'No'][self.numerical_columns].sample(n=1).reset_index(drop=True) choice_0_categorics = data[data['HeartDisease'] == 'No'][self.categorical_features].sample(n=1).reset_index(drop=True) oversampled_numerics_0 = pd.concat([oversampled_numerics_1, choice_0_numerics]) oversampled_categorics_0 = pd.concat([oversampled_categorics_0, choice_0_categorics]) else: choice_1_numerics = data[data['HeartDisease'] == 'Yes'][self.numerical_columns].sample(n=1).reset_index(drop=True) choice_2_numerics = data[data['HeartDisease'] == 'Yes'][self.numerical_columns].sample(n=1).reset_index(drop=True) constant = np.random.random() synthetic_numerics = choice_1_numerics * constant + choice_2_numerics * (1 - constant) oversampled_numerics_1 = pd.concat([oversampled_numerics_1, synthetic_numerics]) oversampled_numerics_0.reset_index(drop=True, inplace=True) oversampled_numerics_1.reset_index(drop=True, inplace=True) oversampled_categorics_0.reset_index(drop=True, inplace=True) neighbors = KNeighborsClassifier(n_neighbors=3) neighbors.fit(data[self.numerical_columns], data[self.categorical_features[:-1]]) oversampled_categorics_1 = pd.DataFrame(neighbors.predict(oversampled_numerics_1), columns=self.categorical_features[:-1]) oversampled_categorics_1['HeartDisease'] = 'Yes' oversampled_categorics = pd.concat([oversampled_categorics_0, oversampled_categorics_1]) oversampled_numerics = pd.concat([oversampled_numerics_0, oversampled_numerics_1]) oversampled_data = oversampled_numerics.join(oversampled_categorics) oversampled_data = shuffle(oversampled_data) oversampled_data.reset_index(drop=True, inplace=True) del data return oversampled_data def transform(self): if self.smote: self.data = self.smote_upsample() else: pass if self.scaling: self.scaling_func() else: pass self.label_encode() return self.data.copy() print('Sample Ratio:', int(len(data[data.HeartDisease == 'No']) / len(data[data.HeartDisease == 'Yes']))) zeros = data[data['HeartDisease'] == 'No'].sample(n=int(len(data) / 50), replace=False) ones = data[data['HeartDisease'] == 'Yes'].sample(n=int(len(data) / 500), replace=False) small_data = shuffle(pd.concat([zeros, ones])) small_data.reset_index(drop=True, inplace=True) print(small_data.shape) small_data.head()
code
90154899/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.info()
code
90154899/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from termcolor import colored from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T diabetic_map = {'No, borderline diabetes': 'No', 'Yes (during pregnancy)': 'Yes', 'Yes': 'Yes', 'No': 'No'} data.Diabetic = data.Diabetic.map(diabetic_map) plt.figure(figsize=(10, 5)) sns.countplot('Sex', hue='HeartDisease', data=data) plt.title('Heart Disease for Different Genders', size=15) plt.show()
code
90154899/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T
code
90154899/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from termcolor import colored from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T diabetic_map = {'No, borderline diabetes': 'No', 'Yes (during pregnancy)': 'Yes', 'Yes': 'Yes', 'No': 'No'} data.Diabetic = data.Diabetic.map(diabetic_map) categorical_features = ['Smoking', 'AlcoholDrinking', 'Stroke', 'DiffWalking', 'Sex', 'AgeCategory', 'Race', 'Diabetic', 'PhysicalActivity', 'GenHealth', 'Asthma', 'KidneyDisease', 'SkinCancer', 'HeartDisease'] numerical_columns = [x for x in data.columns if x not in categorical_features] def plotting_function(feature): fig, axes = plt.subplots(ncols=3, figsize=(25, 5)) heart_disease_by_feature = pd.DataFrame(data[data['HeartDisease'] == 'Yes'].groupby(feature).count()['HeartDisease']) heart_disease_by_feature.sort_values(by='HeartDisease', ascending=False).plot(kind='bar', ax=axes[0]) axes[1].pie(heart_disease_by_feature['HeartDisease'], labels=heart_disease_by_feature.index, autopct='%.2f') data[feature].value_counts().sort_values(ascending=False).plot(kind='bar', ax=axes[2], color='green') axes[0].set_title(f'{feature} Among People Having Heart Disease (Bar)', size=15) axes[1].set_title(f'{feature} Among People Having Heart Disease (Pie)', size=15) axes[2].set_title(f'{feature} Sample Count') axes[0].set_xlabel('') axes[2].set_xlabel('') axes[2].set_ylabel('') plt.show() for categoric_col in categorical_features[:-1]: plotting_function(categoric_col)
code
90154899/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from termcolor import colored from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.describe().T diabetic_map = {'No, borderline diabetes': 'No', 'Yes (during pregnancy)': 'Yes', 'Yes': 'Yes', 'No': 'No'} data.Diabetic = data.Diabetic.map(diabetic_map) plt.figure(figsize=(15, 5)) sns.countplot('AgeCategory', hue='HeartDisease', data=data) plt.title('Heart Disease for Different Ranges of Ages', size=15) plt.show()
code
90154899/cell_5
[ "image_output_11.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd data = pd.read_csv('../input/personal-key-indicators-of-heart-disease/heart_2020_cleaned.csv') data.head()
code
104124170/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) Type_amount = data.groupby('Type_ID')['Revenue'].sum().reset_index() Type_amount.rename(columns={'Revenue': 'Amount'}, inplace=True) Type_amount
code
104124170/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.figure(figsize=(12, 5)) sns.barplot(x='InvoiceNo', y='Revenue', data=Rev_Country) plt.xticks(rotation=40, ha='right') plt.title('Total Revenue of InvoiceNo') plt.xlabel('InvoiceNo') plt.ylabel('Total Revenue')
code
104124170/cell_9
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() data['Type'].nunique()
code
104124170/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) Type_amount = data.groupby('Type_ID')['Revenue'].sum().reset_index() Type_amount.rename(columns={'Revenue': 'Amount'}, inplace=True) Type_amount Type_Frequency = data.groupby('Type_ID')['Type'].count().reset_index() Type_Frequency.columns = ['Type_ID', 'Frequency'] Type_Frequency data
code
104124170/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data.info()
code
104124170/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) data.head()
code
104124170/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] plt.figure(figsize=(12, 5)) sns.barplot(x='CustomerID', y='Revenue', data=Rev_Coustomer) plt.title('Top15 spend Most Client ') plt.xlabel('Client') plt.ylabel('Total Revenue')
code
104124170/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) Type_amount = data.groupby('Type_ID')['Revenue'].sum().reset_index() Type_amount.rename(columns={'Revenue': 'Amount'}, inplace=True) Type_amount Type_Frequency = data.groupby('Type_ID')['Type'].count().reset_index() Type_Frequency.columns = ['Type_ID', 'Frequency'] Type_Frequency df = pd.merge(Type_amount, Type_Frequency, how='inner', on='Type_ID') max_date = max(data['InvoiceDate']) data['Diff'] = max_date - data['InvoiceDate'] Type_Recency = data.groupby('Type_ID')['Diff'].min().reset_index() Type_Recency['Diff'] = Type_Recency['Diff'].dt.days + 1 Type_Recency.rename(columns={'Diff': 'Recency'}, inplace=True) df = df.merge(Type_Recency, how='inner', on='Type_ID') df
code
104124170/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type Type_SingleClient = Most_customer_Type.sort_values(by='Quantity', ascending=False).reset_index()[:10] plt.figure(figsize=(14, 8)) sns.barplot(x='Type', y='Quantity', hue='CustomerID', data=Type_SingleClient) plt.title('Total Revenue of InvoiceNo') plt.xlabel('InvoiceNo') plt.ylabel('Total Revenue')
code
104124170/cell_3
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data.head()
code
104124170/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df
code
104124170/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) Type_amount = data.groupby('Type_ID')['Revenue'].sum().reset_index() Type_amount.rename(columns={'Revenue': 'Amount'}, inplace=True) Type_amount Type_Frequency = data.groupby('Type_ID')['Type'].count().reset_index() Type_Frequency.columns = ['Type_ID', 'Frequency'] Type_Frequency c = data[['Type', 'Type_ID']].drop_duplicates() c
code
104124170/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type
code
104124170/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() a = data['Description'].str.split() data['Type'] = a.apply(lambda x: ' '.join(x[-2:])) Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') Rev_Country = data.groupby('InvoiceNo')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.xticks(rotation=40, ha='right') grouped = data.groupby(['Type', 'CustomerID'])['Quantity'].sum().reset_index() grouped = grouped.sort_values(by=['Type', 'Quantity'], ascending=[True, False]) grouped['Rank'] = grouped.groupby(['Type']).cumcount() + 1 Most_customer_Type = grouped[['Type', 'CustomerID', 'Quantity']][grouped['Rank'] == 1] Most_customer_Type a = data['Type'].unique() df = pd.DataFrame(data=a.flatten()).reset_index() df['Type'] = df[0] df = df[['index', 'Type']] df data = data.merge(df, how='inner', on='Type') data.rename(columns={'index': 'Type_ID'}, inplace=True) Type_amount = data.groupby('Type_ID')['Revenue'].sum().reset_index() Type_amount.rename(columns={'Revenue': 'Amount'}, inplace=True) Type_amount Type_Frequency = data.groupby('Type_ID')['Type'].count().reset_index() Type_Frequency.columns = ['Type_ID', 'Frequency'] Type_Frequency
code
104124170/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/onlineretail/OnlineRetail.csv', encoding='cp1252', header=0) data = data.dropna() Rev_Coustomer = data.groupby('CustomerID')['Revenue'].sum().sort_values(ascending=False).reset_index()[:15] Rev_Country = data.groupby('Type')['Revenue'].sum().sort_values(ascending=False).reset_index()[:20] plt.figure(figsize=(12, 5)) sns.barplot(x='Type', y='Revenue', data=Rev_Country) plt.xticks(rotation=40, ha='right') plt.title('Total Revenue of Type') plt.xlabel('Type') plt.ylabel('Total Revenue')
code
34120972/cell_21
[ "text_plain_output_1.png" ]
import albumentations import ast import cv2 import matplotlib.patches as patches import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_id = 'c14c1e300' image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 pascal_voc_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values coco_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'width', 'height']].astype(np.int32).values assert len(pascal_voc_boxes) == len(coco_boxes) labels = np.ones((len(pascal_voc_boxes),)) def get_bbox(bboxes, col, color='white', bbox_format='pascal_voc'): for i in range(len(bboxes)): if bbox_format == 'pascal_voc': rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') else: rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2], bboxes[i][3], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=pascal_voc_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(pascal_voc_boxes, ax[0], color='red') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show() aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1), albumentations.Blur(p=1)], bbox_params={'format': 'coco', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=coco_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(coco_boxes, ax[0], color='red', bbox_format='coco') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red', bbox_format='coco') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show()
code
34120972/cell_4
[ "image_output_1.png" ]
import ast import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] train_df.head()
code
34120972/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import ast import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_ids = train_df['image_id'].unique() print(f'Total number of training images: {len(image_ids)}')
code
34120972/cell_29
[ "text_plain_output_1.png" ]
from albumentations.augmentations.bbox_utils import denormalize_bbox, normalize_bbox from albumentations.core.transforms_interface import DualTransform from collections import namedtuple import albumentations import ast import cv2 import matplotlib.patches as patches import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_ids = train_df['image_id'].unique() image_id = 'c14c1e300' image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 pascal_voc_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values coco_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'width', 'height']].astype(np.int32).values assert len(pascal_voc_boxes) == len(coco_boxes) labels = np.ones((len(pascal_voc_boxes),)) def get_bbox(bboxes, col, color='white', bbox_format='pascal_voc'): for i in range(len(bboxes)): if bbox_format == 'pascal_voc': rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') else: rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2], bboxes[i][3], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=pascal_voc_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(pascal_voc_boxes, ax[0], color='red') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show() aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1), albumentations.Blur(p=1)], bbox_params={'format': 'coco', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=coco_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(coco_boxes, ax[0], color='red', bbox_format='coco') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red', bbox_format='coco') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show() class CustomCutout(DualTransform): """ Custom Cutout augmentation with handling of bounding boxes Note: (only supports square cutout regions) Author: Kaushal28 """ def __init__(self, fill_value=0, bbox_removal_threshold=0.5, min_cutout_size=192, max_cutout_size=512, always_apply=False, p=0.5): """ Class construstor :param fill_value: Value to be filled in cutout (default is 0 or black color) :param bbox_removal_threshold: Bboxes having content cut by cutout path more than this threshold will be removed :param min_cutout_size: minimum size of cutout (192 x 192) :param max_cutout_size: maximum size of cutout (512 x 512) """ super(CustomCutout, self).__init__(always_apply, p) self.fill_value = fill_value self.bbox_removal_threshold = bbox_removal_threshold self.min_cutout_size = min_cutout_size self.max_cutout_size = max_cutout_size def _get_cutout_position(self, img_height, img_width, cutout_size): """ Randomly generates cutout position as a named tuple :param img_height: height of the original image :param img_width: width of the original image :param cutout_size: size of the cutout patch (square) :returns position of cutout patch as a named tuple """ position = namedtuple('Point', 'x y') return position(np.random.randint(0, img_width - cutout_size + 1), np.random.randint(0, img_height - cutout_size + 1)) def _get_cutout(self, img_height, img_width): """ Creates a cutout pacth with given fill value and determines the position in the original image :param img_height: height of the original image :param img_width: width of the original image :returns (cutout patch, cutout size, cutout position) """ cutout_size = np.random.randint(self.min_cutout_size, self.max_cutout_size + 1) cutout_position = self._get_cutout_position(img_height, img_width, cutout_size) return (np.full((cutout_size, cutout_size, 3), self.fill_value), cutout_size, cutout_position) def apply(self, image, **params): """ Applies the cutout augmentation on the given image :param image: The image to be augmented :returns augmented image """ image = image.copy() self.img_height, self.img_width, _ = image.shape cutout_arr, cutout_size, cutout_pos = self._get_cutout(self.img_height, self.img_width) self.image = image self.cutout_pos = cutout_pos self.cutout_size = cutout_size image[cutout_pos.y:cutout_pos.y + cutout_size, cutout_pos.x:cutout_size + cutout_pos.x, :] = cutout_arr return image def apply_to_bbox(self, bbox, **params): """ Removes the bounding boxes which are covered by the applied cutout :param bbox: A single bounding box coordinates in pascal_voc format :returns transformed bbox's coordinates """ bbox = denormalize_bbox(bbox, self.img_height, self.img_width) x_min, y_min, x_max, y_max = tuple(map(int, bbox)) bbox_size = (x_max - x_min) * (y_max - y_min) overlapping_size = np.sum((self.image[y_min:y_max, x_min:x_max, 0] == self.fill_value) & (self.image[y_min:y_max, x_min:x_max, 1] == self.fill_value) & (self.image[y_min:y_max, x_min:x_max, 2] == self.fill_value)) if overlapping_size / bbox_size > self.bbox_removal_threshold: return normalize_bbox((0, 0, 0, 0), self.img_height, self.img_width) return normalize_bbox(bbox, self.img_height, self.img_width) def get_transform_init_args_names(self): """ Fetches the parameter(s) of __init__ method :returns: tuple of parameter(s) of __init__ method """ return 'fill_value' augmentation = albumentations.Compose([CustomCutout(p=1), albumentations.Flip(always_apply=True), albumentations.OneOf([albumentations.Blur(p=0.5), albumentations.GaussNoise(var_limit=5.0 / 255.0, p=0.5)], p=1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) def get_bbox(bboxes, col, color='white'): for i in range(len(bboxes)): rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) num_images = 5 rand_start = np.random.randint(0, len(image_ids) - 5) fig, ax = plt.subplots(nrows=num_images, ncols=2, figsize=(16, 40)) for index, image_id in enumerate(image_ids[rand_start:rand_start + num_images]): image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 bboxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values labels = np.ones((len(bboxes),)) aug_result = augmentation(image=image, bboxes=bboxes, labels=labels) get_bbox(bboxes, ax[index][0], color='red') ax[index][0].grid(False) ax[index][0].set_xticks([]) ax[index][0].set_yticks([]) ax[index][0].title.set_text('Original Image') ax[index][0].imshow(image) get_bbox(aug_result['bboxes'], ax[index][1], color='red') ax[index][1].grid(False) ax[index][1].set_xticks([]) ax[index][1].set_yticks([]) ax[index][1].title.set_text(f"Augmented Image: Removed bboxes: {len(bboxes) - len(aug_result['bboxes'])}") ax[index][1].imshow(aug_result['image']) plt.show()
code
34120972/cell_32
[ "image_output_1.png" ]
from albumentations.augmentations.bbox_utils import denormalize_bbox, normalize_bbox from albumentations.core.transforms_interface import DualTransform from collections import namedtuple from tqdm import tqdm import albumentations import ast import cv2 import matplotlib.patches as patches import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import zipfile BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_ids = train_df['image_id'].unique() image_id = 'c14c1e300' image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 pascal_voc_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values coco_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'width', 'height']].astype(np.int32).values assert len(pascal_voc_boxes) == len(coco_boxes) labels = np.ones((len(pascal_voc_boxes),)) def get_bbox(bboxes, col, color='white', bbox_format='pascal_voc'): for i in range(len(bboxes)): if bbox_format == 'pascal_voc': rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') else: rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2], bboxes[i][3], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=pascal_voc_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(pascal_voc_boxes, ax[0], color='red') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show() aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1), albumentations.Blur(p=1)], bbox_params={'format': 'coco', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=coco_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(coco_boxes, ax[0], color='red', bbox_format='coco') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red', bbox_format='coco') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show() class CustomCutout(DualTransform): """ Custom Cutout augmentation with handling of bounding boxes Note: (only supports square cutout regions) Author: Kaushal28 """ def __init__(self, fill_value=0, bbox_removal_threshold=0.5, min_cutout_size=192, max_cutout_size=512, always_apply=False, p=0.5): """ Class construstor :param fill_value: Value to be filled in cutout (default is 0 or black color) :param bbox_removal_threshold: Bboxes having content cut by cutout path more than this threshold will be removed :param min_cutout_size: minimum size of cutout (192 x 192) :param max_cutout_size: maximum size of cutout (512 x 512) """ super(CustomCutout, self).__init__(always_apply, p) self.fill_value = fill_value self.bbox_removal_threshold = bbox_removal_threshold self.min_cutout_size = min_cutout_size self.max_cutout_size = max_cutout_size def _get_cutout_position(self, img_height, img_width, cutout_size): """ Randomly generates cutout position as a named tuple :param img_height: height of the original image :param img_width: width of the original image :param cutout_size: size of the cutout patch (square) :returns position of cutout patch as a named tuple """ position = namedtuple('Point', 'x y') return position(np.random.randint(0, img_width - cutout_size + 1), np.random.randint(0, img_height - cutout_size + 1)) def _get_cutout(self, img_height, img_width): """ Creates a cutout pacth with given fill value and determines the position in the original image :param img_height: height of the original image :param img_width: width of the original image :returns (cutout patch, cutout size, cutout position) """ cutout_size = np.random.randint(self.min_cutout_size, self.max_cutout_size + 1) cutout_position = self._get_cutout_position(img_height, img_width, cutout_size) return (np.full((cutout_size, cutout_size, 3), self.fill_value), cutout_size, cutout_position) def apply(self, image, **params): """ Applies the cutout augmentation on the given image :param image: The image to be augmented :returns augmented image """ image = image.copy() self.img_height, self.img_width, _ = image.shape cutout_arr, cutout_size, cutout_pos = self._get_cutout(self.img_height, self.img_width) self.image = image self.cutout_pos = cutout_pos self.cutout_size = cutout_size image[cutout_pos.y:cutout_pos.y + cutout_size, cutout_pos.x:cutout_size + cutout_pos.x, :] = cutout_arr return image def apply_to_bbox(self, bbox, **params): """ Removes the bounding boxes which are covered by the applied cutout :param bbox: A single bounding box coordinates in pascal_voc format :returns transformed bbox's coordinates """ bbox = denormalize_bbox(bbox, self.img_height, self.img_width) x_min, y_min, x_max, y_max = tuple(map(int, bbox)) bbox_size = (x_max - x_min) * (y_max - y_min) overlapping_size = np.sum((self.image[y_min:y_max, x_min:x_max, 0] == self.fill_value) & (self.image[y_min:y_max, x_min:x_max, 1] == self.fill_value) & (self.image[y_min:y_max, x_min:x_max, 2] == self.fill_value)) if overlapping_size / bbox_size > self.bbox_removal_threshold: return normalize_bbox((0, 0, 0, 0), self.img_height, self.img_width) return normalize_bbox(bbox, self.img_height, self.img_width) def get_transform_init_args_names(self): """ Fetches the parameter(s) of __init__ method :returns: tuple of parameter(s) of __init__ method """ return 'fill_value' augmentation = albumentations.Compose([CustomCutout(p=1), albumentations.Flip(always_apply=True), albumentations.OneOf([albumentations.Blur(p=0.5), albumentations.GaussNoise(var_limit=5.0 / 255.0, p=0.5)], p=1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) def get_bbox(bboxes, col, color='white'): for i in range(len(bboxes)): rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) num_images = 5 rand_start = np.random.randint(0, len(image_ids) - 5) fig, ax = plt.subplots(nrows=num_images, ncols=2, figsize=(16, 40)) for index, image_id in enumerate(image_ids[rand_start : rand_start + num_images]): # Read the image from image id image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 # Normalize # Get the bboxes details and apply all the augmentations bboxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values labels = np.ones((len(bboxes), )) # As we have only one class (wheat heads) aug_result = augmentation(image=image, bboxes=bboxes, labels=labels) get_bbox(bboxes, ax[index][0], color='red') ax[index][0].grid(False) ax[index][0].set_xticks([]) ax[index][0].set_yticks([]) ax[index][0].title.set_text('Original Image') ax[index][0].imshow(image) get_bbox(aug_result['bboxes'], ax[index][1], color='red') ax[index][1].grid(False) ax[index][1].set_xticks([]) ax[index][1].set_yticks([]) ax[index][1].title.set_text(f'Augmented Image: Removed bboxes: {len(bboxes) - len(aug_result["bboxes"])}') ax[index][1].imshow(aug_result['image']) plt.show() augmentation = albumentations.Compose([CustomCutout(p=0.5), albumentations.Flip(always_apply=True), albumentations.OneOf([albumentations.Blur(p=0.5), albumentations.GaussNoise(var_limit=5.0 / 255.0, p=0.5)], p=1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) image_metadata = [] with zipfile.ZipFile('train.zip', 'w') as img_out: for index, image_id in tqdm(enumerate(image_ids), total=len(image_ids)): image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 bboxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values labels = np.ones((len(bboxes),)) aug_result = augmentation(image=image, bboxes=bboxes, labels=labels) aug_image = aug_result['image'] aug_bboxes = aug_result['bboxes'] img_out.writestr(f'{image_id}.jpg', image) img_out.writestr(f'{image_id}_aug.jpg', image) for bbox in aug_bboxes: bbox = tuple(map(int, bbox)) image_metadata.append({'image_id': f'{image_id}_aug', 'x_min': bbox[0], 'y_min': bbox[1], 'x_max': bbox[2], 'y_max': bbox[3], 'width': bbox[2] - bbox[0], 'height': bbox[3] - bbox[1], 'area': (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])}) break
code
34120972/cell_8
[ "text_html_output_1.png" ]
import ast import cv2 import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_id = 'c14c1e300' image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 plt.figure(figsize=(10, 10)) plt.imshow(image) plt.show()
code
34120972/cell_16
[ "text_html_output_1.png" ]
import albumentations import ast import cv2 import matplotlib.patches as patches import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] image_id = 'c14c1e300' image = cv2.imread(os.path.join(BASE_DIR, 'train', f'{image_id}.jpg'), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 pascal_voc_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'x_max', 'y_max']].astype(np.int32).values coco_boxes = train_df[train_df['image_id'] == image_id][['x_min', 'y_min', 'width', 'height']].astype(np.int32).values assert len(pascal_voc_boxes) == len(coco_boxes) labels = np.ones((len(pascal_voc_boxes),)) def get_bbox(bboxes, col, color='white', bbox_format='pascal_voc'): for i in range(len(bboxes)): if bbox_format == 'pascal_voc': rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2] - bboxes[i][0], bboxes[i][3] - bboxes[i][1], linewidth=2, edgecolor=color, facecolor='none') else: rect = patches.Rectangle((bboxes[i][0], bboxes[i][1]), bboxes[i][2], bboxes[i][3], linewidth=2, edgecolor=color, facecolor='none') col.add_patch(rect) aug = albumentations.Compose([albumentations.Resize(512, 512), albumentations.VerticalFlip(1)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']}) aug_result = aug(image=image, bboxes=pascal_voc_boxes, labels=labels) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 16)) get_bbox(pascal_voc_boxes, ax[0], color='red') ax[0].title.set_text('Original Image') ax[0].imshow(image) get_bbox(aug_result['bboxes'], ax[1], color='red') ax[1].title.set_text('Augmented Image') ax[1].imshow(aug_result['image']) plt.show()
code
34120972/cell_5
[ "image_output_1.png" ]
import ast import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) BASE_DIR = '/kaggle/input/global-wheat-detection' WORK_DIR = '/kaggle/working' np.random.seed(1996) train_df = pd.read_csv(os.path.join(BASE_DIR, 'train.csv')) train_df[['x_min', 'y_min', 'width', 'height']] = pd.DataFrame([ast.literal_eval(x) for x in train_df.bbox.tolist()], index=train_df.index) train_df = train_df[['image_id', 'bbox', 'source', 'x_min', 'y_min', 'width', 'height']] train_df['area'] = train_df['width'] * train_df['height'] train_df['x_max'] = train_df['x_min'] + train_df['width'] train_df['y_max'] = train_df['y_min'] + train_df['height'] train_df = train_df.drop(['bbox', 'source'], axis=1) train_df = train_df[['image_id', 'x_min', 'y_min', 'x_max', 'y_max', 'width', 'height', 'area']] train_df = train_df[train_df['area'] < 100000] print(train_df.shape)
code
90118148/cell_4
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time']) train['hour'] = train['time'].dt.hour train['minute'] = train['time'].dt.minute submission_in = pd.read_csv('../input/tabular-playground-march-2022-04/lightautoml_rounded_special_ve_37_v2.csv') sep = train[(train.time.dt.hour >= 12) & (train.time.dt.weekday < 5) & (train.time.dt.dayofyear >= 246)] lower = sep.groupby(['hour', 'minute', 'x', 'y', 'direction']).congestion.quantile(0.2).values upper = sep.groupby(['hour', 'minute', 'x', 'y', 'direction']).congestion.quantile(0.65).values submission_out = submission_in.copy() submission_out['congestion'] = submission_in.congestion.clip(lower, upper) submission_out.to_csv('submission.csv', index=False) mae = mean_absolute_error(submission_in.congestion, submission_out.congestion) submission_out submission_out['congestion'] = submission_out.congestion.round().astype(int) submission_out.to_csv('submission_rounded.csv', index=False) submission_out
code
90118148/cell_3
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time']) train['hour'] = train['time'].dt.hour train['minute'] = train['time'].dt.minute submission_in = pd.read_csv('../input/tabular-playground-march-2022-04/lightautoml_rounded_special_ve_37_v2.csv') sep = train[(train.time.dt.hour >= 12) & (train.time.dt.weekday < 5) & (train.time.dt.dayofyear >= 246)] lower = sep.groupby(['hour', 'minute', 'x', 'y', 'direction']).congestion.quantile(0.2).values upper = sep.groupby(['hour', 'minute', 'x', 'y', 'direction']).congestion.quantile(0.65).values submission_out = submission_in.copy() submission_out['congestion'] = submission_in.congestion.clip(lower, upper) submission_out.to_csv('submission.csv', index=False) mae = mean_absolute_error(submission_in.congestion, submission_out.congestion) print(f'Mean absolute modification: {mae:.4f}') print(f'Submission was below lower bound: {(submission_in.congestion < lower - 0.5).sum()}') print(f'Submission was above upper bound: {(submission_in.congestion > upper + 0.5).sum()}') submission_out
code
73090666/cell_4
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') categorical = [col for col in train.columns if train[col].dtype == 'object'] encoder = LabelEncoder() for column in categorical: train[column] = encoder.fit_transform(train[column]) test[column] = encoder.transform(test[column]) X = train.drop(['id', 'target'], axis=1) y = train['target'] X_valid = test.drop(['id'], axis=1) X.head()
code
73090666/cell_6
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error from xgboost import XGBRegressor model = XGBRegressor(n_estimators=1000, learning_rate=0.05, gamma=0.2) model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_test, y_test)], verbose=False) y_predict = model.predict(X_test) print('MSE', mean_squared_error(y_test, y_predict, squared=False)) print('MAE', mean_absolute_error(y_test, y_predict))
code
73090666/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') train.head()
code
73090666/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73090666/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') categorical = [col for col in train.columns if train[col].dtype == 'object'] encoder = LabelEncoder() for column in categorical: train[column] = encoder.fit_transform(train[column]) test[column] = encoder.transform(test[column]) X = train.drop(['id', 'target'], axis=1) y = train['target'] X_valid = test.drop(['id'], axis=1) model = XGBRegressor(n_estimators=1000, learning_rate=0.05, gamma=0.2) model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_test, y_test)], verbose=False) y_predict = model.predict(X_test) predictions = model.predict(X_valid) sample_sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') submission = pd.DataFrame({'Id': sample_sub['id'], 'target': predictions.astype(float)}) submission.to_csv('./my_submission.csv', index=False) print('Your submission was successfully saved!')
code
73090666/cell_3
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') categorical = [col for col in train.columns if train[col].dtype == 'object'] encoder = LabelEncoder() for column in categorical: train[column] = encoder.fit_transform(train[column]) test[column] = encoder.transform(test[column]) train.head()
code
18116693/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') Vertical1 = vocab.groupby('Vertical1') vocab.dtypes
code
18116693/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') sample_sub = pd.read_csv('../input/sample_submission.csv') print(sample_sub.head())
code
18116693/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') print('Total number of names :', vocab['Name'].nunique())
code
18116693/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') Vertical1 = vocab.groupby('Vertical1') vocab.dtypes text = ' '.join((WD for WD in vocab.WikiDescription)) print('There are {} words in the combination of all review.'.format(len(text)))
code
18116693/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18116693/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') Vertical1 = vocab.groupby('Vertical1') print(Vertical1.describe().head())
code
18116693/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') Vertical1 = vocab.groupby('Vertical1') print(vocab.WikiDescription.head(10))
code
18116693/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') print(vocab.head())
code
18116693/cell_12
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vocab = pd.read_csv('../input/vocabulary.csv') Vertical1 = vocab.groupby('Vertical1') vocab.dtypes text = ' '.join((WD for WD in vocab.WikiDescription)) from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt wordcloud = WordCloud(max_words=500, background_color='white').generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
18116693/cell_5
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input/frame-sample/frame/'))
code
17120711/cell_6
[ "text_plain_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object space = hp.choice('a', [{'ksize': hp.choice('ksize_val', [2, 3, 4]), 'psize': hp.choice('psize_val', [2, 3, 4]), 'ssize': hp.choice('stridesize_val', [1, 2])}]) trials = Trials() best = fmin(objective, space, algo=tpe.suggest, max_evals=10, trials=trials)
code
17120711/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format()
code
17120711/cell_7
[ "image_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object space = hp.choice('a', [{'ksize': hp.choice('ksize_val', [2, 3, 4]), 'psize': hp.choice('psize_val', [2, 3, 4]), 'ssize': hp.choice('stridesize_val', [1, 2])}]) trials = Trials() best = fmin(objective, space, algo=tpe.suggest, max_evals=10, trials=trials) print(best) trials.results
code
17120711/cell_18
[ "text_plain_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object kernelsize = 4 poolsize = 2 stridesize = 1 model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=100, batch_size=256, validation_data=(Xtest, ytest), verbose=0) model4 = Sequential() model4.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Dropout(0.5)) model4.add(Flatten()) model4.add(BatchNormalization()) model4.add(Dense(128, activation='relu')) model4.add(BatchNormalization()) model4.add(Dense(49, activation='softmax')) model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history4 = model4.fit(Xtrain, ytrain, epochs=50, batch_size=256, validation_data=(Xtest, ytest), verbose=1) model4 = Sequential() model4.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Dropout(0.25)) model4.add(Flatten()) model4.add(BatchNormalization()) model4.add(Dense(128, activation='relu')) model4.add(BatchNormalization()) model4.add(Dense(49, activation='softmax')) model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history4 = model4.fit(Xtrain, ytrain, epochs=50, batch_size=256, validation_data=(Xtest, ytest), verbose=1) plot_history(history4)
code
17120711/cell_16
[ "image_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object kernelsize = 4 poolsize = 2 stridesize = 1 model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=100, batch_size=256, validation_data=(Xtest, ytest), verbose=0) model4 = Sequential() model4.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Dropout(0.5)) model4.add(Flatten()) model4.add(BatchNormalization()) model4.add(Dense(128, activation='relu')) model4.add(BatchNormalization()) model4.add(Dense(49, activation='softmax')) model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history4 = model4.fit(Xtrain, ytrain, epochs=50, batch_size=256, validation_data=(Xtest, ytest), verbose=1) model4 = Sequential() model4.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Dropout(0.25)) model4.add(Flatten()) model4.add(BatchNormalization()) model4.add(Dense(128, activation='relu')) model4.add(BatchNormalization()) model4.add(Dense(49, activation='softmax')) model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history4 = model4.fit(Xtrain, ytrain, epochs=50, batch_size=256, validation_data=(Xtest, ytest), verbose=1)
code
17120711/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object kernelsize = 4 poolsize = 2 stridesize = 1 model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=100, batch_size=256, validation_data=(Xtest, ytest), verbose=0) model4 = Sequential() model4.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model4.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model4.add(MaxPooling2D(pool_size=poolsize)) model4.add(BatchNormalization()) model4.add(Dropout(0.5)) model4.add(Flatten()) model4.add(BatchNormalization()) model4.add(Dense(128, activation='relu')) model4.add(BatchNormalization()) model4.add(Dense(49, activation='softmax')) model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history4 = model4.fit(Xtrain, ytrain, epochs=50, batch_size=256, validation_data=(Xtest, ytest), verbose=1)
code
17120711/cell_12
[ "text_plain_output_1.png" ]
from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from keras.models import Sequential, model_from_json from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import keras from keras.models import Sequential, model_from_json from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization from sklearn.preprocessing import MinMaxScaler from hyperopt import hp, fmin, tpe, space_eval, STATUS_OK, Trials from keras import backend as K K.image_data_format() def plot_history(history): pass def get_data(): Xtrain = np.load('../input/k49-train-imgs.npz')['arr_0'] ytrain = np.load('../input/k49-train-labels.npz')['arr_0'] Xtest = np.load('../input/k49-test-imgs.npz')['arr_0'] ytest = np.load('../input/k49-test-labels.npz')['arr_0'] train_one_hot_labels = keras.utils.to_categorical(ytrain, num_classes=49) test_one_hot_labels = keras.utils.to_categorical(ytest, num_classes=49) n_train = ytrain.shape[0] n_test = ytest.shape[0] npix = Xtrain.shape[1] Xtrain1 = Xtrain.reshape(n_train, -1) Xtest1 = Xtest.reshape(n_test, -1) scaler = MinMaxScaler() Xtrain1 = scaler.fit_transform(Xtrain1).astype('float32') Xtest1 = scaler.fit_transform(Xtest1).astype('float32') if K.image_data_format() == 'channels_last': Xtrain2d = Xtrain.reshape(n_train, npix, npix, 1).astype('float32') / 255 Xtest2d = Xtest.reshape(n_test, npix, npix, 1).astype('float32') / 255 input_shape = (npix, npix, 1) return (Xtrain2d, train_one_hot_labels, Xtest2d, test_one_hot_labels, input_shape) def objective(args): kernelsize = args['ksize'] poolsize = args['psize'] stridesize = args['ssize'] model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=25, batch_size=256, validation_data=(Xtest, ytest), verbose=0) return_object = {} return_object['status'] = STATUS_OK return_object['loss'] = history3.history['loss'][0] return_object['deets'] = {'loss': history3.history['loss'][0], 'val_loss': history3.history['val_loss'][0], 'acc': history3.history['acc'][0], 'val_acc': history3.history['val_acc'][0], 'args': args, 'model': model3} return return_object kernelsize = 4 poolsize = 2 stridesize = 1 model3 = Sequential() model3.add(Conv2D(32, kernel_size=kernelsize, strides=stridesize, activation='relu', input_shape=input_shape)) model3.add(Conv2D(64, kernel_size=kernelsize, strides=stridesize, activation='relu')) model3.add(MaxPooling2D(pool_size=poolsize)) model3.add(BatchNormalization()) model3.add(Dropout(0.25)) model3.add(Flatten()) model3.add(BatchNormalization()) model3.add(Dense(128, activation='relu')) model3.add(BatchNormalization()) model3.add(Dense(49, activation='softmax')) model3.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) history3 = model3.fit(Xtrain, ytrain, epochs=100, batch_size=256, validation_data=(Xtest, ytest), verbose=0) plot_history(history3)
code
73061688/cell_21
[ "text_plain_output_1.png" ]
from keras.applications import InceptionV3 IncV3 = InceptionV3(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
code
73061688/cell_13
[ "image_output_2.png", "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array import matplotlib.pyplot as plt train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' img = load_img(train_path + '/ANHINGA/001.jpg') plt.imshow(img) plt.axis('off') plt.title('Sample Anhinga Image') plt.show() plt.figure() img = load_img(train_path + '/BALD EAGLE/018.jpg') plt.imshow(img) plt.axis('off') plt.title('Sample Bald Eagle Image') plt.show()
code
73061688/cell_26
[ "text_plain_output_1.png" ]
from glob import glob from keras.applications import InceptionV3 from keras.layers import Dense, Flatten, BatchNormalization, Dropout from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' className = glob(train_path + '/*') NumberofClass = len(className) train_datagen = ImageDataGenerator(rescale=1 / 255) validation_datagen = ImageDataGenerator(rescale=1 / 255) test_datagen = ImageDataGenerator(rescale=1 / 255) batch_size = 256 train_datagen = ImageDataGenerator(rescale=1 / 255, shear_range=0.3, horizontal_flip=True, zoom_range=0.3) val_datagen = ImageDataGenerator(rescale=1 / 255) train_generator = train_datagen.flow_from_directory(train_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical') val_generator = val_datagen.flow_from_directory(validation_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical') IncV3 = InceptionV3(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) model = Sequential() model.add(IncV3) for layer in model.layers: layer.trainable = False model.add(Flatten()) model.add(Dense(units=2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(units=NumberofClass, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_generator, validation_data=val_generator, epochs=10, batch_size=batch_size)
code
73061688/cell_19
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' train_datagen = ImageDataGenerator(rescale=1 / 255) validation_datagen = ImageDataGenerator(rescale=1 / 255) test_datagen = ImageDataGenerator(rescale=1 / 255) batch_size = 256 train_datagen = ImageDataGenerator(rescale=1 / 255, shear_range=0.3, horizontal_flip=True, zoom_range=0.3) val_datagen = ImageDataGenerator(rescale=1 / 255) train_generator = train_datagen.flow_from_directory(train_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical') val_generator = val_datagen.flow_from_directory(validation_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical')
code
73061688/cell_28
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from glob import glob from keras.applications import InceptionV3 from keras.layers import Dense, Flatten, BatchNormalization, Dropout from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array import matplotlib.pyplot as plt train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' img = load_img(train_path + '/ANHINGA/001.jpg') plt.axis('off') img = load_img(train_path + '/BALD EAGLE/018.jpg') plt.axis('off') fig, axs = plt.subplots(2, 2, figsize=(8, 8)) axs[0,0].imshow(load_img(train_path + "/BARN OWL/018.jpg")) axs[0,0].axis("off") axs[0,1].imshow(load_img(train_path + "/ALBATROSS/001.jpg")) axs[0,1].axis("off") axs[1,0].imshow(load_img(train_path + "/CANARY/107.jpg")) axs[1,0].axis("off") axs[1,1].imshow(load_img(train_path + "/CROW/100.jpg")) axs[1,1].axis("off") plt.show() className = glob(train_path + '/*') NumberofClass = len(className) train_datagen = ImageDataGenerator(rescale=1 / 255) validation_datagen = ImageDataGenerator(rescale=1 / 255) test_datagen = ImageDataGenerator(rescale=1 / 255) batch_size = 256 train_datagen = ImageDataGenerator(rescale=1 / 255, shear_range=0.3, horizontal_flip=True, zoom_range=0.3) val_datagen = ImageDataGenerator(rescale=1 / 255) train_generator = train_datagen.flow_from_directory(train_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical') val_generator = val_datagen.flow_from_directory(validation_path, target_size=(224, 224), batch_size=batch_size, color_mode='rgb', class_mode='categorical') IncV3 = InceptionV3(include_top=False, weights='imagenet', input_shape=(224, 224, 3)) model = Sequential() model.add(IncV3) for layer in model.layers: layer.trainable = False model.add(Flatten()) model.add(Dense(units=2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(units=NumberofClass, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_generator, validation_data=val_generator, epochs=10, batch_size=batch_size) plt.figure(figsize=(8, 8)) plt.plot(history.history['loss'], color='b', label='Training loss') plt.plot(history.history['val_loss'], color='r', label='Validation loss') plt.legend() plt.show() plt.figure() plt.figure(figsize=(8, 8)) plt.plot(history.history['accuracy'], color='b', label='Training accuracy') plt.plot(history.history['val_accuracy'], color='r', label='Validation accuracy') plt.legend() plt.show()
code
73061688/cell_16
[ "text_plain_output_1.png" ]
from glob import glob train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' className = glob(train_path + '/*') NumberofClass = len(className) print('NumberofClass:', NumberofClass)
code
73061688/cell_14
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array import matplotlib.pyplot as plt train_path = '../input/100-bird-species/285 birds/train' test_path = '../input/100-bird-species/285 birds/test' validation_path = '../input/100-bird-species/285 birds/valid' img = load_img(train_path + '/ANHINGA/001.jpg') plt.axis('off') img = load_img(train_path + '/BALD EAGLE/018.jpg') plt.axis('off') fig, axs = plt.subplots(2, 2, figsize=(8, 8)) axs[0, 0].imshow(load_img(train_path + '/BARN OWL/018.jpg')) axs[0, 0].axis('off') axs[0, 1].imshow(load_img(train_path + '/ALBATROSS/001.jpg')) axs[0, 1].axis('off') axs[1, 0].imshow(load_img(train_path + '/CANARY/107.jpg')) axs[1, 0].axis('off') axs[1, 1].imshow(load_img(train_path + '/CROW/100.jpg')) axs[1, 1].axis('off') plt.show()
code
105209340/cell_1
[ "text_plain_output_1.png" ]
import gc import os import cv2 import zipfile import rasterio import numpy as np import pandas as pd from PIL import Image import tifffile from tqdm.notebook import tqdm import matplotlib.pyplot as plt from rasterio.windows import Window from torch.utils.data import Dataset !pip install staintools !pip install spams
code
105209340/cell_8
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import staintools imaging_measurements = {'hpa': {'pixel_size': {'kidney': 0.4, 'prostate': 0.4, 'largeintestine': 0.4, 'spleen': 0.4, 'lung': 0.4}, 'tissue_thickness': {'kidney': 4, 'prostate': 4, 'largeintestine': 4, 'spleen': 4, 'lung': 4}}, 'hubmap': {'pixel_size': {'kidney': 0.5, 'prostate': 6.263, 'largeintestine': 0.229, 'spleen': 0.4945, 'lung': 0.7562}, 'tissue_thickness': {'kidney': 10, 'prostate': 5, 'largeintestine': 8, 'spleen': 4, 'lung': 5}}} import staintools target = cv2.imread('/kaggle/input/hubmap-organ-segmentation/test_images/10078.tiff') target = staintools.LuminosityStandardizer.standardize(target) normalizer = staintools.StainNormalizer(method='vahadane') normalizer.fit(target) OUT_TRAIN = './hubmap_images' os.makedirs(OUT_TRAIN, exist_ok=False) MASKS = '../input/hubmap-organ-segmentation/train.csv' DATA = '../input/hubmap-organ-segmentation/train_images' def augment_image(image, domain_pixel_size, target_pixel_size, domain_tissue_thickness, target_tissue_thickness, alpha=0.15): """ Visualize raw and augmented images Parameters ---------- image (numpy.ndarray of shape (height, width, 3)): Image array domain_pixel_size (float): Pixel size of the domain images in micrometers target_pixel_size (float): Pixel size of the target images in micrometers domain_tissue_thickness (float): Tissue thickness of the domain images in micrometers target_tissue_thickness (float): Tissue thickness of the target images in micrometers alpha (float): Multiplier to control saturation and value scale """ tissue_thickness_scale_factor = target_tissue_thickness - domain_tissue_thickness image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV).astype(np.float32) image_hsv[:, :, 1] *= 1 + alpha * tissue_thickness_scale_factor image_hsv[:, :, 2] *= 1 - alpha * tissue_thickness_scale_factor image_hsv = image_hsv.astype(np.uint8) image_scaled = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2RGB) image_scaled = staintools.LuminosityStandardizer.standardize(image_scaled) pixel_size_scale_factor = domain_pixel_size / target_pixel_size image_resized = cv2.resize(image_scaled, dsize=None, fx=pixel_size_scale_factor, fy=pixel_size_scale_factor, interpolation=cv2.INTER_CUBIC) image_resized = cv2.resize(image_resized, dsize=(image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC) image = staintools.LuminosityStandardizer.standardize(image) image_augmented = staintools.LuminosityStandardizer.standardize(image_resized) image_augmented = normalizer.transform(image_augmented) return (image, image_augmented) df_train = pd.read_csv(MASKS) df_masks = df_train.reset_index(drop=True) for idx, row in df_masks.iterrows(): image = cv2.imread(os.path.join(DATA, str(row['id']) + '.tiff')) image, image_augmented = augment_image(image=image, domain_pixel_size=imaging_measurements['hpa']['pixel_size'][row['organ']], target_pixel_size=imaging_measurements['hubmap']['pixel_size'][row['organ']], domain_tissue_thickness=imaging_measurements['hpa']['tissue_thickness'][row['organ']], target_tissue_thickness=imaging_measurements['hubmap']['tissue_thickness'][row['organ']], alpha=0.15) rimage = cv2.resize(image_augmented, (1024, 1024), interpolation=cv2.INTER_LINEAR) cv2.imwrite(os.path.join(OUT_TRAIN, str(row['id']) + '.png'), rimage) def visualize_augmentation(image, image_augmented, metadata, path=None): """ Visualize raw and augmented images Parameters ---------- image (numpy.ndarray of shape (height, width, 3)): Raw image array image_augmented (numpy.ndarray of shape (height, width, 3)): Augmented image array metadata (dict): Dictionary of image metadata path (path-like str or None): Path of the output file or None (if path is None, plot is displayed with selected backend) """ fig, axes = plt.subplots(figsize=(36, 20), ncols=2) axes[0].imshow(image) axes[1].imshow(image_augmented) for i in range(2): axes[i].set_xlabel('') axes[i].set_ylabel('') axes[i].tick_params(axis='x', labelsize=15, pad=10) axes[i].tick_params(axis='y', labelsize=15, pad=10) axes[0].set_title(f'Raw Image\nMean: {np.mean(image):.4f} - Std: {np.std(image):.4f} - Min: {np.min(image)} - Max: {np.max(image)}', size=25, pad=15) axes[1].set_title(f'Augmented Image\nMean: {np.mean(image_augmented):.4f} - Std: {np.std(image_augmented):.4f} - Min: {np.min(image_augmented)} - Max: {np.max(image_augmented)}', size=25, pad=15) fig.suptitle( f''' Image ID {metadata["id"]} - {metadata["organ"]} - {metadata["data_source"]} - {metadata["age"]} - {metadata["sex"]} Raw Image Shape: {metadata["img_height"]}x{metadata["img_width"]} - Pixel Size: {metadata["pixel_size"]}µm - Tissue Thickness: {metadata["tissue_thickness"]}µm Augmented Image Shape: {metadata["img_height"]}x{metadata["img_width"]} - Pixel Size: {imaging_measurements["hubmap"]["pixel_size"][metadata["organ"]]}µm - Tissue Thickness: {imaging_measurements["hubmap"]["tissue_thickness"][metadata["organ"]]}µm ''', fontsize=50, y=1.05 ) if path is None: plt.show() else: plt.savefig(path) plt.close(fig) visualize_augmentation(image=image, image_augmented=image_augmented, metadata=row.to_dict())
code
72120119/cell_21
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df1.head()
code
72120119/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.info()
code
72120119/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.head()
code
72120119/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') train.head()
code
72120119/cell_23
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df1['month'].value_counts().plot(kind='bar')
code
72120119/cell_33
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_20.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df_new = df1.drop('date', axis=1) df_new.columns features = df_new[['date_block_num', 'item_price', 'item_cnt_day', 'year', 'month']] df_new.isnull().sum() for i in df_new.select_dtypes(include='number').columns: sns.boxplot(df_new[i]) plt.show()
code
72120119/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df_new = df1.drop('date', axis=1) df_new.columns
code
72120119/cell_11
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape
code
72120119/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72120119/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df_new = df1.drop('date', axis=1) df_new.columns df_new.isnull().sum()
code
72120119/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.describe(include='object')
code
72120119/cell_17
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns
code
72120119/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df_new = df1.drop('date', axis=1) df_new.columns features = df_new[['date_block_num', 'item_price', 'item_cnt_day', 'year', 'month']] for i in features.columns: for j in features.columns: if i != j: sns.scatterplot(x=df_new[i], y=df_new[j]) plt.show()
code
72120119/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.describe()
code
72120119/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items_cat = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') df_items = pd.merge(left=items, right=items_cat, on='item_category_id') df_shops = pd.merge(left=train, right=shops, on='shop_id') df = pd.merge(left=df_shops, right=df_items, on='item_id') df.shape df.nunique() df.columns df1 = df.drop(['shop_id', 'item_id', 'item_category_id'], axis=1) df1['year'].value_counts().plot(kind='bar')
code