path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128048094/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) print('Training accuracy: ', model_logistic.score(X_train, y_train)) print('Testing accuracy: ', model_logistic.score(X_test, y_test))
code
128048094/cell_40
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) print(classification_report(y_test, model_knn.predict(X_test))) cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) ConfusionMatrixDisplay(cm_knn).plot() plt.show()
code
128048094/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) print(classification_report(y_test, model_tree.predict(X_test))) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) ConfusionMatrixDisplay(cm_tree).plot() plt.show()
code
128048094/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) print(classification_report(y_test, model_tree.predict(X_test))) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) ConfusionMatrixDisplay(cm_tree).plot() plt.show()
code
128048094/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) data_encoded.head(10)
code
128048094/cell_18
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) print('Training accuracy: ', model_tree.score(X_train, y_train)) print('Testing accuracy: ', model_tree.score(X_test, y_test))
code
128048094/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) print(classification_report(y_test, model_logistic.predict(X_test))) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) ConfusionMatrixDisplay(cm_logistic).plot() plt.show()
code
128048094/cell_16
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) print('Training accuracy: ', model_knn.score(X_train, y_train)) print('Testing accuracy: ', model_knn.score(X_test, y_test))
code
128048094/cell_38
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) print('Training accuracy: ', model_logistic.score(X_train, y_train)) print('Testing accuracy: ', model_logistic.score(X_test, y_test))
code
128048094/cell_17
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.svm import LinearSVC model_svm = LinearSVC() model_svm.fit(X_train, y_train) print('Training accuracy: ', model_svm.score(X_train, y_train)) print('Testing accuracy: ', model_svm.score(X_test, y_test))
code
128048094/cell_35
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_svm = LinearSVC() model_svm.fit(X_train, y_train) print('Training accuracy: ', model_svm.score(X_train, y_train)) print('Testing accuracy: ', model_svm.score(X_test, y_test))
code
128048094/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) print(classification_report(y_test, model_svm.predict(X_test))) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) ConfusionMatrixDisplay(cm_svm).plot() plt.show()
code
128048094/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) print(y_pred) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred
code
128048094/cell_24
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report print(classification_report(y_test, model_knn.predict(X_test))) cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) ConfusionMatrixDisplay(cm_knn).plot() plt.show()
code
128048094/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) print(classification_report(y_test, model_svm.predict(X_test))) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) ConfusionMatrixDisplay(cm_svm).plot() plt.show()
code
128048094/cell_37
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) print('Training accuracy: ', model_tree.score(X_train, y_train)) print('Testing accuracy: ', model_tree.score(X_test, y_test))
code
128048094/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.head(10)
code
128048094/cell_36
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data = data.dropna() data_encoded = pd.get_dummies(data, columns=['Property_Area']) model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) model_svm = LinearSVC() model_svm.fit(X_train, y_train) model_tree = DecisionTreeClassifier() model_tree.fit(X_train, y_train) model_logistic = LogisticRegression() model_logistic.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report cm_knn = confusion_matrix(y_test, model_knn.predict(X_test)) cm_tree = confusion_matrix(y_test, model_tree.predict(X_test)) cm_svm = confusion_matrix(y_test, model_svm.predict(X_test)) cm_logistic = confusion_matrix(y_test, model_logistic.predict(X_test)) y_pred = model_logistic.predict(X_test) arr = np.array(y_pred) compare_pre_act = pd.DataFrame(arr, columns=['Prediction']) y_true = y_test.values flatten_y_true = y_true.flatten() df_pred = pd.DataFrame({'y_true': flatten_y_true, 'y_pred': y_pred}) df_pred model_knn = KNeighborsClassifier() model_knn.fit(X_train, y_train) print('Training accuracy: ', model_knn.score(X_train, y_train)) print('Testing accuracy: ', model_knn.score(X_test, y_test))
code
17098311/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data = pd.concat([train_data, x, y], axis=1) train_data = train_data.drop(['DOJ', 'ProjectsWorkedOn'], axis=1) train_data.head()
code
17098311/cell_4
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes
code
17098311/cell_6
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') test_data.dtypes test_data.info()
code
17098311/cell_11
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data.head()
code
17098311/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17098311/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes print(train_data.shape) print(test_data.shape) train_data.head()
code
17098311/cell_8
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_profiling as pp def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes import pandas_profiling as pp pp.ProfileReport(train_data)
code
17098311/cell_15
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data = pd.concat([train_data, x, y], axis=1) train_data = train_data.drop(['DOJ', 'ProjectsWorkedOn'], axis=1) train_data = train_data.drop(['p4', 'p5', 'p6', 'p7', 'MONTH', 'DAY'], axis=1) corr = train_data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2)
code
17098311/cell_17
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data = pd.concat([train_data, x, y], axis=1) train_data = train_data.drop(['DOJ', 'ProjectsWorkedOn'], axis=1) train_data = train_data.drop(['p4', 'p5', 'p6', 'p7', 'MONTH', 'DAY'], axis=1) corr = train_data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) corr_matrix = train_data.corr().abs() upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) to_drop = [column for column in upper.columns if any(upper[column] > 0.99)] train_data.drop(axis=1, columns=to_drop, inplace=True) train_data.dtypes
code
17098311/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data = pd.concat([train_data, x, y], axis=1) train_data = train_data.drop(['DOJ', 'ProjectsWorkedOn'], axis=1) train_data = train_data.drop(['p4', 'p5', 'p6', 'p7', 'MONTH', 'DAY'], axis=1) train_data.head()
code
17098311/cell_10
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) train_data.head()
code
17098311/cell_12
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') train_data.dtypes test_data.dtypes train_data = train_data.drop(['HadGrievance', 'Promoted_InLast3Yrs', 'EmployeeID'], axis=1) x = train_data['ProjectsWorkedOn'].str.split(',', expand=True) x.columns = ('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7') y = train_data['DOJ'].str.split('-', expand=True) y.columns = ('YEAR', 'MONTH', 'DAY') train_data = pd.concat([train_data, x, y], axis=1) train_data.head()
code
17098311/cell_5
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error,mean_absolute_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) train_data = pd.read_csv('../input/Train-1555063579947.csv') test_data = pd.read_csv('../input/Test-1555063594850.csv') test_data.dtypes
code
50237900/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score patient_kfold_df = patient_kfold_df.reset_index() train_gt_df = train_gt_df.merge(patient_kfold_df[['PatientID', 'fold']], on='PatientID', how='left') patient_kfold_df[['PatientID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds_by_patient_id.csv', index=False) train_gt_df[['StudyInstanceUID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds.csv', index=False) patients_per_fold = train_gt_df[['fold', 'PatientID']].groupby('fold').agg({'PatientID': 'nunique'}).rename(columns={'PatientID': 'Num. patients'}) patients_per_fold
code
50237900/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score patient_kfold_df = patient_kfold_df.reset_index() train_gt_df = train_gt_df.merge(patient_kfold_df[['PatientID', 'fold']], on='PatientID', how='left') patient_kfold_df[['PatientID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds_by_patient_id.csv', index=False) train_gt_df[['StudyInstanceUID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds.csv', index=False) train_gt_df[['fold'] + target_col].groupby('fold').sum().transpose()
code
50237900/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score
code
50237900/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score current_kfold = [] for _ in tqdm(range(len(patient_kfold_df))): fold_scores = [(f, kfold_quality(current_kfold + [f])) for f in range(NUM_FOLDS)] best_fold = sorted(fold_scores, key=lambda x: x[1])[0][0] current_kfold.append(best_fold) patient_kfold_df['fold'] = current_kfold
code
50237900/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) print(len(train_gt_df.PatientID.unique()), 'patients')
code
50237900/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score patient_kfold_df = patient_kfold_df.reset_index() train_gt_df = train_gt_df.merge(patient_kfold_df[['PatientID', 'fold']], on='PatientID', how='left') patient_kfold_df[['PatientID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds_by_patient_id.csv', index=False) train_gt_df[['StudyInstanceUID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds.csv', index=False) images_per_fold = train_gt_df[['fold', 'StudyInstanceUID']].groupby('fold').count().rename(columns={'StudyInstanceUID': 'Num. images'}) images_per_fold
code
50237900/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd NUM_FOLDS = 5 import pandas as pd from tqdm import tqdm data_dir = '../input/ranzcr-clip-catheter-line-classification/' target_col = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] def annotation_col_name(x): x = x[1] return 'annotation-' + ''.join([c for c in x if c.upper() == c and c != ' ']) annotations_df = pd.read_csv(data_dir + 'train_annotations.csv', usecols=['StudyInstanceUID', 'label']) annotations_df['ones'] = 1 annotations_df = pd.pivot_table(annotations_df, values=['ones'], index=['StudyInstanceUID'], columns=['label'], aggfunc=np.sum).fillna(0) annotations_df.columns = [annotation_col_name(col_name) for col_name in annotations_df.columns] train_gt_df = pd.read_csv(data_dir + '/train.csv') train_gt_df = train_gt_df.merge(annotations_df, on='StudyInstanceUID', how='left').fillna(0) target_col += [x for x in annotations_df.columns] num_target = len(target_col) kfold_df = train_gt_df.copy().drop(['StudyInstanceUID'], axis=1) patient_kfold_df = kfold_df.groupby('PatientID').sum() priority_df = patient_kfold_df.sum().to_frame().reset_index().rename(columns={'index': 'label', 0: 'score'}) priority_df['score'] = [s * 4 if 'annotation' in l else s for l, s in zip(priority_df['label'], priority_df['score'])] priority_df.sort_values(by='score', inplace=True) label_priority = list(priority_df['label']) label_importance = [(0.1 + patient_kfold_df.sum().max() - x) ** 2 for x in priority_df['score']] label_importance[0] = 3 * label_importance[0] patient_kfold_df = patient_kfold_df.sort_values(by=label_priority, ascending=False) def kfold_quality(folds): patient_kfold_df['fold'] = folds + [-1] * (len(patient_kfold_df) - len(folds)) scores = patient_kfold_df[:len(folds)][['fold'] + target_col].groupby('fold').sum() if scores.shape[0] < NUM_FOLDS: scores.reset_index(inplace=True) for f in range(NUM_FOLDS): if f not in scores.index: scores.loc[len(scores)] = 0 scores.loc[len(scores) - 1, 'fold'] = f scores.set_index('fold', drop=True, inplace=True) score = scores.sum(axis=1).std() for label, importance in zip(label_priority, label_importance): scores[label] *= importance score += scores[label].max() - scores[label].min() return score patient_kfold_df = patient_kfold_df.reset_index() train_gt_df = train_gt_df.merge(patient_kfold_df[['PatientID', 'fold']], on='PatientID', how='left') patient_kfold_df[['PatientID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds_by_patient_id.csv', index=False) train_gt_df[['StudyInstanceUID', 'fold']].to_csv(f'stratified_{NUM_FOLDS}_folds.csv', index=False)
code
17109703/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os from random import shuffle from tqdm import tqdm from PIL import Image import warnings warnings.filterwarnings('ignore') import os train_cat = '../input/training_set/training_set/cats' train_dog = '../input/training_set/training_set/dogs' test_cat = '../input/test_set/test_set/cats' test_dog = '../input/test_set/test_set/dogs' image_size = 128 for image in tqdm(os.listdir(train_cat)): path = os.path.join(train_cat, image) img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img = cv2.resize(img, (image_size, image_size)).flatten() except: pass np_img = np.asarray(img) for image2 in tqdm(os.listdir(train_dog)): path = os.path.join(train_dog, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)).flatten() except: pass np_img2 = np.asarray(img2) plt.axis('off') plt.axis('off') def train_data(): train_data_cat = [] train_data_dog = [] for image1 in tqdm(os.listdir(train_cat)): path = os.path.join(train_cat, image) img1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img1 = cv2.resize(img1, (image_size, image_size)) except: pass train_data_cat.append(img1) for image2 in tqdm(os.listdir(train_dog)): path = os.path.join(train_dog, image) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)) except: pass train_data_dog.append(img2) train_data = np.concatenate((np.asarray(train_data_cat).reshape(4001, -1), np.asarray(train_data_dog).reshape(4001, -1)), axis=1) return train_data def test_data(): test_data_cat = [] test_data_dog = [] for image1 in tqdm(os.listdir(test_cat)): path = os.path.join(test_cat, image1) img1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img1 = cv2.resize(img1, (image_size, image_size)) except: pass test_data_cat.append(img1) for image2 in tqdm(os.listdir(test_dog)): path = os.path.join(test_dog, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)) except: pass test_data_dog.append(img2) test_data = np.concatenate((np.asarray(test_data_cat).reshape(1001, -1), np.asarray(test_data_dog).reshape(1001, -1)), axis=1) return test_data train_data = train_data() test_data = test_data() print(train_data.shape) print(train_data.reshape(4001, 128, 128)) print(test_data.shape)
code
17109703/cell_4
[ "image_output_1.png" ]
from PIL import Image Image.open('../input/training_set/training_set/cats/cat.1.jpg') Image.open('../input/training_set/training_set/dogs/dog.1.jpg')
code
17109703/cell_1
[ "text_plain_output_1.png" ]
import os import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os from random import shuffle from tqdm import tqdm from PIL import Image import warnings warnings.filterwarnings('ignore') import os print(os.listdir('../input'))
code
17109703/cell_8
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os from random import shuffle from tqdm import tqdm from PIL import Image import warnings warnings.filterwarnings('ignore') import os train_cat = '../input/training_set/training_set/cats' train_dog = '../input/training_set/training_set/dogs' test_cat = '../input/test_set/test_set/cats' test_dog = '../input/test_set/test_set/dogs' image_size = 128 for image in tqdm(os.listdir(train_cat)): path = os.path.join(train_cat, image) img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img = cv2.resize(img, (image_size, image_size)).flatten() except: pass np_img = np.asarray(img) for image2 in tqdm(os.listdir(train_dog)): path = os.path.join(train_dog, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)).flatten() except: pass np_img2 = np.asarray(img2) plt.axis('off') plt.axis('off') def train_data(): train_data_cat = [] train_data_dog = [] for image1 in tqdm(os.listdir(train_cat)): path = os.path.join(train_cat, image) img1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img1 = cv2.resize(img1, (image_size, image_size)) except: pass train_data_cat.append(img1) for image2 in tqdm(os.listdir(train_dog)): path = os.path.join(train_dog, image) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)) except: pass train_data_dog.append(img2) train_data = np.concatenate((np.asarray(train_data_cat).reshape(4001, -1), np.asarray(train_data_dog).reshape(4001, -1)), axis=1) return train_data def test_data(): test_data_cat = [] test_data_dog = [] for image1 in tqdm(os.listdir(test_cat)): path = os.path.join(test_cat, image1) img1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img1 = cv2.resize(img1, (image_size, image_size)) except: pass test_data_cat.append(img1) for image2 in tqdm(os.listdir(test_dog)): path = os.path.join(test_dog, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)) except: pass test_data_dog.append(img2) test_data = np.concatenate((np.asarray(test_data_cat).reshape(1001, -1), np.asarray(test_data_dog).reshape(1001, -1)), axis=1) return test_data train_data = train_data() test_data = test_data()
code
17109703/cell_3
[ "image_output_1.png" ]
from PIL import Image Image.open('../input/training_set/training_set/cats/cat.1.jpg')
code
17109703/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os from random import shuffle from tqdm import tqdm from PIL import Image import warnings warnings.filterwarnings('ignore') import os train_cat = '../input/training_set/training_set/cats' train_dog = '../input/training_set/training_set/dogs' test_cat = '../input/test_set/test_set/cats' test_dog = '../input/test_set/test_set/dogs' image_size = 128 for image in tqdm(os.listdir(train_cat)): path = os.path.join(train_cat, image) img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img = cv2.resize(img, (image_size, image_size)).flatten() except: pass np_img = np.asarray(img) for image2 in tqdm(os.listdir(train_dog)): path = os.path.join(train_dog, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) try: img2 = cv2.resize(img2, (image_size, image_size)).flatten() except: pass np_img2 = np.asarray(img2) plt.figure(figsize=(10, 10)) plt.subplot(1, 2, 1) plt.imshow(np_img.reshape(image_size, image_size)) plt.axis('off') plt.subplot(1, 2, 2) plt.imshow(np_img2.reshape(image_size, image_size)) plt.axis('off') plt.title('Cats and Dogs in GrayScale')
code
32068734/cell_23
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import csv import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.optimize as optim train_data = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_data = train_data.replace(np.nan, '', regex=True) test_data = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data = test_data.replace(np.nan, '', regex=True) def filter_train_data(country, region): selector = train_data['Country_Region'] == country onlyonecountry = train_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] del onlyoneregion['Id'] onlyoneregion['Timestep'] = onlyoneregion.index return onlyoneregion def infection_start(country, region): try: infection_start_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') infection_start_df = infection_start_df.replace(np.nan, '', regex=True) selector = infection_start_df['Country_Region'] == country onlyonecountry = infection_start_df[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] return onlyoneregion['ConfirmedCases'].iloc[0] except: return 2 population_reader = csv.reader(open('../input/population/population.csv', 'r')) population_dict = {} next(population_reader) for row in population_reader: k, v = row population_dict[k] = int(v) pop_by_region_reader = csv.reader(open('../input/populationbycity/populationbycity.csv', 'r')) populationbyregion_dict = {} next(pop_by_region_reader) for row in pop_by_region_reader: k, v = row populationbyregion_dict[k] = int(v) def get_population(country, region): if region != '': if region in populationbyregion_dict: return populationbyregion_dict[region] else: return 1000000 elif country != '': if country in population_dict: return population_dict[country] else: return 1000000 else: return 1000000 def filter_test_data(country, region, date): selector = test_data['Country_Region'] == country onlyonecountry = test_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] onlyoneregion['Timestep'] = onlyoneregion.index + 71 if date != '': dateselect = test_data['Date'] == date onlyoneregion = onlyoneregion[dateselect] return onlyoneregion def my_logistic(t, a, b, c): return c / (1 + a * np.exp(-b * t)) def calculate_infection(country_entry, region_entry, date_entry): train_set = filter_train_data(country_entry, region_entry) local_x = np.array(train_set['Timestep']) + 1 local_y = np.array(train_set['ConfirmedCases']) p0 = np.random.exponential(size=3) bounds = (0, [100000.0, 3.0, 1000000000.0]) try: (a, b, c), cov = optim.curve_fit(my_logistic, local_x, local_y, p0=p0, bounds=bounds) except: initial_infected = infection_start(country_entry, region_entry) c = get_population(country_entry, region_entry) b = 2 a = c - 1 test_set = filter_test_data(country_entry, region_entry, date_entry) test_set['Infected'] = round(my_logistic(test_set['Timestep'], a, b, c)) return test_set.iloc[0]['Infected'] uruguay_forecast = calculate_infection('Uruguay', '', '2020-05-11') print(uruguay_forecast)
code
32068734/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068734/cell_18
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_data = train_data.replace(np.nan, '', regex=True) test_data = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data = test_data.replace(np.nan, '', regex=True) def filter_train_data(country, region): selector = train_data['Country_Region'] == country onlyonecountry = train_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] del onlyoneregion['Id'] onlyoneregion['Timestep'] = onlyoneregion.index return onlyoneregion def infection_start(country, region): try: infection_start_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') infection_start_df = infection_start_df.replace(np.nan, '', regex=True) selector = infection_start_df['Country_Region'] == country onlyonecountry = infection_start_df[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] return onlyoneregion['ConfirmedCases'].iloc[0] except: return 2 def filter_test_data(country, region, date): selector = test_data['Country_Region'] == country onlyonecountry = test_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] onlyoneregion['Timestep'] = onlyoneregion.index + 71 if date != '': dateselect = test_data['Date'] == date onlyoneregion = onlyoneregion[dateselect] return onlyoneregion panama_test = filter_test_data('Panama', '', '2020-05-11') panama_test.head()
code
32068734/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_data = train_data.replace(np.nan, '', regex=True) test_data = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data = test_data.replace(np.nan, '', regex=True) def filter_train_data(country, region): selector = train_data['Country_Region'] == country onlyonecountry = train_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] del onlyoneregion['Id'] onlyoneregion['Timestep'] = onlyoneregion.index return onlyoneregion def infection_start(country, region): try: infection_start_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') infection_start_df = infection_start_df.replace(np.nan, '', regex=True) selector = infection_start_df['Country_Region'] == country onlyonecountry = infection_start_df[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] return onlyoneregion['ConfirmedCases'].iloc[0] except: return 2 infection_start('Brazil', '')
code
32068734/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_data = train_data.replace(np.nan, '', regex=True) test_data = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data = test_data.replace(np.nan, '', regex=True) def filter_train_data(country, region): selector = train_data['Country_Region'] == country onlyonecountry = train_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] del onlyoneregion['Id'] onlyoneregion['Timestep'] = onlyoneregion.index return onlyoneregion panama = filter_train_data('Panama', '') panama.head()
code
32068734/cell_27
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy import stats import csv import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.optimize as optim train_data = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train_data = train_data.replace(np.nan, '', regex=True) test_data = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data = test_data.replace(np.nan, '', regex=True) def filter_train_data(country, region): selector = train_data['Country_Region'] == country onlyonecountry = train_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] del onlyoneregion['Id'] onlyoneregion['Timestep'] = onlyoneregion.index return onlyoneregion def infection_start(country, region): try: infection_start_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') infection_start_df = infection_start_df.replace(np.nan, '', regex=True) selector = infection_start_df['Country_Region'] == country onlyonecountry = infection_start_df[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] return onlyoneregion['ConfirmedCases'].iloc[0] except: return 2 population_reader = csv.reader(open('../input/population/population.csv', 'r')) population_dict = {} next(population_reader) for row in population_reader: k, v = row population_dict[k] = int(v) pop_by_region_reader = csv.reader(open('../input/populationbycity/populationbycity.csv', 'r')) populationbyregion_dict = {} next(pop_by_region_reader) for row in pop_by_region_reader: k, v = row populationbyregion_dict[k] = int(v) def get_population(country, region): if region != '': if region in populationbyregion_dict: return populationbyregion_dict[region] else: return 1000000 elif country != '': if country in population_dict: return population_dict[country] else: return 1000000 else: return 1000000 def filter_test_data(country, region, date): selector = test_data['Country_Region'] == country onlyonecountry = test_data[selector] selector2 = onlyonecountry['Province_State'] == region onlyoneregion = onlyonecountry[selector2] onlyoneregion = onlyoneregion.reset_index(drop=False) del onlyoneregion['index'] onlyoneregion['Timestep'] = onlyoneregion.index + 71 if date != '': dateselect = test_data['Date'] == date onlyoneregion = onlyoneregion[dateselect] return onlyoneregion def my_logistic(t, a, b, c): return c / (1 + a * np.exp(-b * t)) def calculate_infection(country_entry, region_entry, date_entry): train_set = filter_train_data(country_entry, region_entry) local_x = np.array(train_set['Timestep']) + 1 local_y = np.array(train_set['ConfirmedCases']) p0 = np.random.exponential(size=3) bounds = (0, [100000.0, 3.0, 1000000000.0]) try: (a, b, c), cov = optim.curve_fit(my_logistic, local_x, local_y, p0=p0, bounds=bounds) except: initial_infected = infection_start(country_entry, region_entry) c = get_population(country_entry, region_entry) b = 2 a = c - 1 test_set = filter_test_data(country_entry, region_entry, date_entry) test_set['Infected'] = round(my_logistic(test_set['Timestep'], a, b, c)) return test_set.iloc[0]['Infected'] def calculate_fatalities(country_entry, region_entry, date): df = filter_train_data(country_entry, region_entry) X = df['Timestep'].values Y = df['Fatalities'].values slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y) forecast_day = filter_test_data(country_entry, region_entry, date).iloc[0]['Timestep'] return round(slope * forecast_day + intercept) submission_file = pd.read_csv('../input/covid19-global-forecasting-week-4/submission.csv') test_data_to_forecast = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test_data_to_forecast = test_data_to_forecast.replace(np.nan, '', regex=True) merged_inner = pd.merge(submission_file, test_data_to_forecast, on='ForecastId') beginning = 0 end = 13458 with open('submission.csv', 'w', newline='') as file: writer = csv.writer(file) writer.writerow(['ForecastId', 'ConfirmedCases', 'Fatalities']) for j in range(beginning, end + 1): try: forecast_Id = merged_inner.iloc[j, 0] infected = calculate_infection(merged_inner.iloc[j, 4], merged_inner.iloc[j, 3], merged_inner.iloc[j, 5]) casualties = calculate_fatalities(merged_inner.iloc[j, 4], merged_inner.iloc[j, 3], merged_inner.iloc[j, 5]) lst = [int(forecast_Id), int(infected), int(casualties)] print(*lst, sep=', ') writer.writerow(lst) except: print('error:' + str(forecast_Id) + ' ') lst = [int(forecast_Id), 0, 0] print(*lst, sep=', ') writer.writerow(lst) continue print('End')
code
32068734/cell_12
[ "text_plain_output_1.png" ]
import csv population_reader = csv.reader(open('../input/population/population.csv', 'r')) population_dict = {} next(population_reader) for row in population_reader: k, v = row population_dict[k] = int(v) pop_by_region_reader = csv.reader(open('../input/populationbycity/populationbycity.csv', 'r')) populationbyregion_dict = {} next(pop_by_region_reader) for row in pop_by_region_reader: k, v = row populationbyregion_dict[k] = int(v) def get_population(country, region): if region != '': if region in populationbyregion_dict: return populationbyregion_dict[region] else: return 1000000 elif country != '': if country in population_dict: return population_dict[country] else: return 1000000 else: return 1000000 print(get_population('Denmark', ''))
code
106192404/cell_1
[ "text_plain_output_1.png" ]
!pip install open_clip_torch
code
106192404/cell_7
[ "text_plain_output_1.png" ]
import torch model = MyModel() model.eval() x = torch.rand(1, 3, 336, 336, device='cuda') model(x).shape saved_model = torch.jit.script(model) saved_model.save('saved_model.pt') saved_model(x).shape
code
106192404/cell_5
[ "text_plain_output_1.png" ]
import torch model = MyModel() model.eval() x = torch.rand(1, 3, 336, 336, device='cuda') model(x).shape
code
18120849/cell_21
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c c[3:5] = (300, 400) c
code
18120849/cell_13
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) print(f'Numpy Array b {b}') print(f'Type of Numpy Array b {type(b)}') print(f'Elements Type of Numpy Array b {b.dtype}') print(f'Size of Numpy Array b {b.size}') print(f'Dimensions of Numpy Array b {b.ndim}') print(f'Shape of Numpy Array b {b.shape}')
code
18120849/cell_25
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v
code
18120849/cell_34
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v y = np.array([1, 2]) y u = np.array([1, 2]) u v = np.array([3, 1]) v
code
18120849/cell_30
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v y = np.array([1, 2]) y
code
18120849/cell_33
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v y = np.array([1, 2]) y u = np.array([1, 2]) u
code
18120849/cell_44
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z z = u @ v z u = np.array([1, 2, 3, -1]) u z = u + 1 z
code
18120849/cell_20
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c d = c[1:4] d d.size
code
18120849/cell_40
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z z = u @ v z
code
18120849/cell_39
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z
code
18120849/cell_26
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z
code
18120849/cell_48
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b
code
18120849/cell_11
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) for index, element in enumerate(a): print(f'index {index} element {element}')
code
18120849/cell_19
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c d = c[1:4] d
code
18120849/cell_50
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b np.pi
code
18120849/cell_52
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b np.pi x = np.array([0, np.pi / 2, np.pi]) x y = np.sin(x) y
code
18120849/cell_7
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) print(f'Numpy Array a \n{a}') print(f'Type of Numpy Array a {type(a)}') print(f'Elements Type of Numpy Array a {a.dtype}') print(f'Size of Numpy Array a {a.size}') print(f'Dimensions of Numpy Array a {a.ndim}') print(f'Shape of Numpy Array a {a.shape}')
code
18120849/cell_49
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b max_b = b.max() max_b
code
18120849/cell_51
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b np.pi x = np.array([0, np.pi / 2, np.pi]) x
code
18120849/cell_28
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z
code
18120849/cell_15
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c
code
18120849/cell_16
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c c[0] = 100 c
code
18120849/cell_38
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v y = np.array([1, 2]) y u = np.array([1, 2]) u u.T
code
18120849/cell_47
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a mean_a = a.mean() mean_a
code
18120849/cell_17
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c c[4] = 0 c
code
18120849/cell_43
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u
code
18120849/cell_31
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z
code
18120849/cell_46
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a
code
18120849/cell_24
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u
code
18120849/cell_14
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) for index, element in enumerate(b): print(f'index {index} element {element}')
code
18120849/cell_53
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z u.T z = np.dot(u, v) z u = np.array([1, 2, 3, -1]) u a = np.array([1, -1, 1, -1]) a b = np.array([1, -2, 3, 4, 5]) b np.pi x = np.array([0, np.pi / 2, np.pi]) x y = np.sin(x) y np.linspace(-2, 2, num=5)
code
18120849/cell_10
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) a[0]
code
18120849/cell_27
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z type(z)
code
18120849/cell_36
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([0, 1, 2, 3, 4]) b = np.array([3.1, 11.02, 6.2, 231.2, 5.2]) c = np.array([20, 1, 2, 3, 4]) c u = np.array([1, 0]) u v = np.array([0, 1]) v z = u + v z z = u - v z y = np.array([1, 2]) y z = 2 * y z u = np.array([1, 2]) u v = np.array([3, 1]) v z = u * v z
code
129023470/cell_4
[ "image_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns
code
129023470/cell_30
[ "text_html_output_1.png" ]
from sklearn.utils import resample import pandas as pd df = pd.read_csv('/kaggle/input/credit-card-fraud-detection/creditcard.csv') df_train = pd.concat([X_train, y_train], axis='columns') df_not_fraud = df_train[df_train['Class'] == 0] df_fraud = df_train[df_train['Class'] == 1] df_not_fraud_downsample = resample(df_not_fraud, replace=False, n_samples=len(df_fraud), random_state=1234) df_downsampled = pd.concat([df_not_fraud_downsample, df_fraud]) df_downsampled['Class'].value_counts()
code
129023470/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/credit-card-fraud-detection/creditcard.csv') df.head()
code
129023470/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/credit-card-fraud-detection/creditcard.csv') df.describe()
code
129023470/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/credit-card-fraud-detection/creditcard.csv') plt.figure(figsize=(12, 8)) sns.countplot(x=df['Class'], data=df) plt.show()
code
129023470/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/credit-card-fraud-detection/creditcard.csv') df['Class'].value_counts()
code
34130031/cell_21
[ "text_html_output_1.png" ]
import json import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json', 'r', errors='ignore') as f: test_information = json.load(f) train_annotations.keys() test_information.keys() train_ann = pd.DataFrame(train_annotations['annotations']) train_cat = pd.DataFrame(train_annotations['categories']) train_imgs = pd.DataFrame(train_annotations['images']) test_imgs = pd.DataFrame(test_information['images']) test_cat = pd.DataFrame(test_information['categories']) test_cat
code
34130031/cell_13
[ "text_plain_output_1.png" ]
import json import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') train_annotations.keys() train_ann = pd.DataFrame(train_annotations['annotations']) train_cat = pd.DataFrame(train_annotations['categories']) train_cat
code
34130031/cell_9
[ "text_html_output_1.png" ]
import json import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json', 'r', errors='ignore') as f: test_information = json.load(f) with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_megadetector_results.json', 'r', errors='ignore') as f: megadetector_results = json.load(f) megadetector_results.keys()
code
34130031/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') samp_sub
code
34130031/cell_11
[ "text_html_output_1.png" ]
import json import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') train_annotations.keys() train_ann = pd.DataFrame(train_annotations['annotations']) train_ann
code
34130031/cell_19
[ "text_html_output_1.png" ]
import json import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json', 'r', errors='ignore') as f: test_information = json.load(f) train_annotations.keys() test_information.keys() train_ann = pd.DataFrame(train_annotations['annotations']) train_cat = pd.DataFrame(train_annotations['categories']) train_imgs = pd.DataFrame(train_annotations['images']) test_imgs = pd.DataFrame(test_information['images']) test_imgs
code