path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34130031/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34130031/cell_7
[ "text_plain_output_1.png" ]
import json import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) train_annotations.keys()
code
34130031/cell_8
[ "text_html_output_1.png" ]
import json import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json', 'r', errors='ignore') as f: test_information = json.load(f) test_information.keys()
code
34130031/cell_15
[ "text_plain_output_1.png" ]
import json import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) samp_sub = pd.read_csv('/kaggle/input/iwildcam-2020-fgvc7/sample_submission.csv') train_annotations.keys() train_ann = pd.DataFrame(train_annotations['annotations']) train_cat = pd.DataFrame(train_annotations['categories']) train_imgs = pd.DataFrame(train_annotations['images']) train_imgs
code
34130031/cell_16
[ "text_plain_output_1.png" ]
import json import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) train_annotations.keys() train_annotations['info']
code
34130031/cell_17
[ "text_html_output_1.png" ]
import json import json with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json', 'r', errors='ignore') as f: train_annotations = json.load(f) with open('/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json', 'r', errors='ignore') as f: test_information = json.load(f) test_information.keys() test_information['info']
code
316827/cell_13
[ "text_html_output_1.png" ]
import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) data.head()
code
316827/cell_30
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('comments') print(compare_means('comments'))
code
316827/cell_33
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('msg_len') print(compare_means('msg_len'))
code
316827/cell_20
[ "text_plain_output_1.png" ]
from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ conf_interval('likes')
code
316827/cell_40
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison shared = data[data.shares > data.shares.quantile(0.98)][data.shares > data.likes * 10][['msg', 'shares']] top = 10 sorted_data = shared.sort_values(by='shares', ascending=False)[:top] likes = data[data.likes > data.likes.quantile(0.98)][data.likes > data.shares * 100][['msg', 'likes']] print('top %d out of %d' % (top, likes.shape[0])) sorted_data = likes.sort_values(by='likes', ascending=False)[:top] for i in sorted_data.index.values: print('likes:', sorted_data.likes[i], '\n', 'message:', sorted_data.msg[i][:300], '\n')
code
316827/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) sns.pairplot(data, hue='gid')
code
316827/cell_43
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison shared = data[data.shares > data.shares.quantile(0.98)][data.shares > data.likes * 10][['msg', 'shares']] top = 10 sorted_data = shared.sort_values(by='shares', ascending=False)[:top] likes = data[data.likes > data.likes.quantile(0.98)][data.likes > data.shares * 100][['msg', 'likes']] sorted_data = likes.sort_values(by='likes', ascending=False)[:top] discussed = data[data.comments > data.comments.quantile(0.98)][['msg', 'comments']] print('top %d out of %d\n' % (top, discussed.shape[0])) sorted_data = discussed.sort_values(by='comments', ascending=False)[:top] for i in sorted_data.index.values: print('comments:', sorted_data.comments[i], '\n', 'message:', sorted_data.msg[i][:300], '\n')
code
316827/cell_24
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('likes') print(compare_means('likes'))
code
316827/cell_27
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('shares') print(compare_means('shares'))
code
316827/cell_37
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) rejected, p_corrected, a1, a2 = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison shared = data[data.shares > data.shares.quantile(0.98)][data.shares > data.likes * 10][['msg', 'shares']] top = 10 print('top %d out of %d' % (top, shared.shape[0])) sorted_data = shared.sort_values(by='shares', ascending=False)[:top] for i in sorted_data.index.values: print('shares:', sorted_data.shares[i], '\n', 'message:', sorted_data.msg[i][:200], '\n')
code
16161648/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() # t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i,name in enumerate(q.index)] vals = [name for i,name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend() for col in ['Pclass', 'SibSp', 'Parch', 'TicketSplitLen', 'Sex', 'CabinPrefix']: unique_vals = np.array(train[col].unique()) unique_vals.sort() for unique_value in unique_vals: for df in [train, test]: df.loc[df[col] == unique_value, f'{col} {unique_value}'] = 1 df.loc[df[col] != unique_value, f'{col} {unique_value}'] = 0 feature_cols = ['Age', 'Fare', 'Pclass 1', 'Pclass 2', 'Pclass 3', 'SibSp', 'Parch', 'TicketSplitLen 1', 'TicketSplitLen 2', 'TicketSplitLen 3', 'Sex female', 'Sex male', 'CabinPrefix A', 'CabinPrefix B', 'CabinPrefix C', 'CabinPrefix D', 'CabinPrefix E', 'CabinPrefix F', 'CabinPrefix G', 'CabinPrefix None'] from sklearn.model_selection import train_test_split X = np.array(train[feature_cols]) y = np.array(train.Survived) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) X_train.shape X_submission = np.array(test[feature_cols]) from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score lr_model = LogisticRegression() lr_model.fit(X_train, y_train) predictions = lr_model.predict(X_test) accuracy = accuracy_score(y_test, predictions) precision = precision_score(y_test, predictions) recall = recall_score(y_test, predictions) f1 = f1_score(y_test, predictions) parameters = lr_model.coef_ comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1']) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout nn_model = Sequential() nn_model.add(Dense(20, activation='relu', input_shape=(20,))) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(64, activation='relu')) nn_model.add(Dense(32, activation='relu')) nn_model.add(Dropout(0.2, noise_shape=None, seed=None)) nn_model.add(Dense(16, activation='relu')) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(1, activation='sigmoid')) nn_model.summary() nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) results = nn_model.fit( X_train, y_train, epochs= 200, batch_size = 48, validation_data = (X_test, y_test) ) f, axes = plt.subplots(1,2, figsize=(10,5)) axes[0].plot(results.history['loss']) axes[0].plot(results.history['val_loss']) axes[0].set_title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') axes[0].grid(color='grey') axes[1].plot(results.history['acc']) axes[1].plot(results.history['val_acc']) axes[1].set_title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') axes[1].grid(color='grey') plt.show() predictions = nn_model.predict(X_test) accuracy = accuracy_score(y_test,predictions.round()) precision = precision_score(y_test,predictions.round()) recall = recall_score(y_test,predictions.round()) f1 = f1_score(y_test,predictions.round()) comparison = comparison.append({'Model':'NN', 'Accuracy':accuracy, 'Precision':precision, 'Recall':recall, 'F1':f1}, ignore_index=True) print(f'Accuracy with NN: {accuracy}') print(f'Precision with NN: {precision}') print(f'Recall with NN: {recall}') print(f'F1 with NN: {f1}') from sklearn.model_selection import RandomizedSearchCV from sklearn.ensemble import RandomForestClassifier n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) rf_random = RandomizedSearchCV(estimator=rf_model, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1) rf_random.fit(X_train, y_train) best_random = rf_random.best_estimator_ predictions = best_random.predict(X_test) accuracy = accuracy_score(y_test, predictions) precision = precision_score(y_test, predictions) recall = recall_score(y_test, predictions) f1 = f1_score(y_test, predictions) comparison = comparison.append({'Model': 'RF', 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1': f1}, ignore_index=True) print(f'Accuracy with RF: {accuracy}') print(f'Precision with RF: {precision}') print(f'Recall with RF: {recall}') print(f'F1 with RF: {f1}')
code
16161648/cell_2
[ "text_plain_output_1.png" ]
import os import string import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
16161648/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() # t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i,name in enumerate(q.index)] vals = [name for i,name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend() print(train.columns) feature_cols = ['Age', 'Fare', 'Pclass 1', 'Pclass 2', 'Pclass 3', 'SibSp', 'Parch', 'TicketSplitLen 1', 'TicketSplitLen 2', 'TicketSplitLen 3', 'Sex female', 'Sex male', 'CabinPrefix A', 'CabinPrefix B', 'CabinPrefix C', 'CabinPrefix D', 'CabinPrefix E', 'CabinPrefix F', 'CabinPrefix G', 'CabinPrefix None']
code
16161648/cell_19
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() # t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i,name in enumerate(q.index)] vals = [name for i,name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend() for col in ['Pclass', 'SibSp', 'Parch', 'TicketSplitLen', 'Sex', 'CabinPrefix']: unique_vals = np.array(train[col].unique()) unique_vals.sort() for unique_value in unique_vals: for df in [train, test]: df.loc[df[col] == unique_value, f'{col} {unique_value}'] = 1 df.loc[df[col] != unique_value, f'{col} {unique_value}'] = 0 feature_cols = ['Age', 'Fare', 'Pclass 1', 'Pclass 2', 'Pclass 3', 'SibSp', 'Parch', 'TicketSplitLen 1', 'TicketSplitLen 2', 'TicketSplitLen 3', 'Sex female', 'Sex male', 'CabinPrefix A', 'CabinPrefix B', 'CabinPrefix C', 'CabinPrefix D', 'CabinPrefix E', 'CabinPrefix F', 'CabinPrefix G', 'CabinPrefix None'] from sklearn.model_selection import train_test_split X = np.array(train[feature_cols]) y = np.array(train.Survived) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) X_train.shape X_submission = np.array(test[feature_cols]) from sklearn.model_selection import RandomizedSearchCV from sklearn.ensemble import RandomForestClassifier n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf_model = RandomForestClassifier() rf_model.fit(X_train, y_train) rf_random = RandomizedSearchCV(estimator=rf_model, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1) rf_random.fit(X_train, y_train) best_random = rf_random.best_estimator_
code
16161648/cell_7
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i, name in enumerate(q.index)] vals = [name for i, name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend()
code
16161648/cell_16
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout nn_model = Sequential() nn_model.add(Dense(20, activation='relu', input_shape=(20,))) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(64, activation='relu')) nn_model.add(Dense(32, activation='relu')) nn_model.add(Dropout(0.2, noise_shape=None, seed=None)) nn_model.add(Dense(16, activation='relu')) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(1, activation='sigmoid')) nn_model.summary() nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
code
16161648/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
16161648/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() # t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i,name in enumerate(q.index)] vals = [name for i,name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend() for col in ['Pclass', 'SibSp', 'Parch', 'TicketSplitLen', 'Sex', 'CabinPrefix']: unique_vals = np.array(train[col].unique()) unique_vals.sort() for unique_value in unique_vals: for df in [train, test]: df.loc[df[col] == unique_value, f'{col} {unique_value}'] = 1 df.loc[df[col] != unique_value, f'{col} {unique_value}'] = 0 feature_cols = ['Age', 'Fare', 'Pclass 1', 'Pclass 2', 'Pclass 3', 'SibSp', 'Parch', 'TicketSplitLen 1', 'TicketSplitLen 2', 'TicketSplitLen 3', 'Sex female', 'Sex male', 'CabinPrefix A', 'CabinPrefix B', 'CabinPrefix C', 'CabinPrefix D', 'CabinPrefix E', 'CabinPrefix F', 'CabinPrefix G', 'CabinPrefix None'] from sklearn.model_selection import train_test_split X = np.array(train[feature_cols]) y = np.array(train.Survived) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) X_train.shape X_submission = np.array(test[feature_cols]) from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score lr_model = LogisticRegression() lr_model.fit(X_train, y_train) predictions = lr_model.predict(X_test) accuracy = accuracy_score(y_test, predictions) precision = precision_score(y_test, predictions) recall = recall_score(y_test, predictions) f1 = f1_score(y_test, predictions) parameters = lr_model.coef_ comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1']) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout nn_model = Sequential() nn_model.add(Dense(20, activation='relu', input_shape=(20,))) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(64, activation='relu')) nn_model.add(Dense(32, activation='relu')) nn_model.add(Dropout(0.2, noise_shape=None, seed=None)) nn_model.add(Dense(16, activation='relu')) nn_model.add(Dropout(0.3, noise_shape=None, seed=None)) nn_model.add(Dense(1, activation='sigmoid')) nn_model.summary() nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) results = nn_model.fit(X_train, y_train, epochs=200, batch_size=48, validation_data=(X_test, y_test)) f, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].plot(results.history['loss']) axes[0].plot(results.history['val_loss']) axes[0].set_title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') axes[0].grid(color='grey') axes[1].plot(results.history['acc']) axes[1].plot(results.history['val_acc']) axes[1].set_title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') axes[1].grid(color='grey') plt.show() predictions = nn_model.predict(X_test) accuracy = accuracy_score(y_test, predictions.round()) precision = precision_score(y_test, predictions.round()) recall = recall_score(y_test, predictions.round()) f1 = f1_score(y_test, predictions.round()) comparison = comparison.append({'Model': 'NN', 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1': f1}, ignore_index=True) print(f'Accuracy with NN: {accuracy}') print(f'Precision with NN: {precision}') print(f'Recall with NN: {recall}') print(f'F1 with NN: {f1}')
code
16161648/cell_14
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def prepareFeatuers(df): df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin] df.loc[df.Cabin.isnull(), 'CabinPrefix'] = 'None' df['CabinKnown'] = [value for value in df.Cabin.isnull()] df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket] df['Sex_Ind'] = -1 df.loc[df.Sex == 'female', 'Sex_Ind'] = 1 df.loc[df.Sex == 'male', 'Sex_Ind'] = 2 df['Age'] = df.Age.fillna(0) df['Fare'] = df.Fare.fillna(0) return df train = prepareFeatuers(train) test = prepareFeatuers(test) cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown'] for col in cols: q = train.groupby(col).Survived.sum() # t = train.groupby(col).Survived.sum() + train.groupby(col).Survived.count() fig, ax = plt.subplots() pos = [i for i,name in enumerate(q.index)] vals = [name for i,name in enumerate(q.index)] ax.barh(pos, t, color='r', label='died') ax.barh(pos, q, label='survived') ax.set_yticks(pos) ax.set_yticklabels(vals) ax.set_ylabel(col) ax.legend() for col in ['Pclass', 'SibSp', 'Parch', 'TicketSplitLen', 'Sex', 'CabinPrefix']: unique_vals = np.array(train[col].unique()) unique_vals.sort() for unique_value in unique_vals: for df in [train, test]: df.loc[df[col] == unique_value, f'{col} {unique_value}'] = 1 df.loc[df[col] != unique_value, f'{col} {unique_value}'] = 0 feature_cols = ['Age', 'Fare', 'Pclass 1', 'Pclass 2', 'Pclass 3', 'SibSp', 'Parch', 'TicketSplitLen 1', 'TicketSplitLen 2', 'TicketSplitLen 3', 'Sex female', 'Sex male', 'CabinPrefix A', 'CabinPrefix B', 'CabinPrefix C', 'CabinPrefix D', 'CabinPrefix E', 'CabinPrefix F', 'CabinPrefix G', 'CabinPrefix None'] from sklearn.model_selection import train_test_split X = np.array(train[feature_cols]) y = np.array(train.Survived) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) X_train.shape X_submission = np.array(test[feature_cols]) from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score lr_model = LogisticRegression() lr_model.fit(X_train, y_train) predictions = lr_model.predict(X_test) accuracy = accuracy_score(y_test, predictions) precision = precision_score(y_test, predictions) recall = recall_score(y_test, predictions) f1 = f1_score(y_test, predictions) parameters = lr_model.coef_ comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1']) print(f'Accuracy with LR: {accuracy}') print(f'Precision with LR: {precision}') print(f'Recall with LR: {recall}') print(f'F1 with LR: {f1}')
code
122249481/cell_13
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize=(20, 10)) fig.add_subplot(1, 2, 1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1, 2, 2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1, 0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1, 0]) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1, 0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1, 0])
code
122249481/cell_4
[ "image_output_1.png" ]
df_X = pd.read_csv('/kaggle/input/machine-learning-challenge-2-prediction/training_set_rna.csv', index_col=0).T df = df_X df_Y = pd.read_csv('/kaggle/input/machine-learning-challenge-2-prediction/training_set_adt.csv', index_col=0).T df_X_submission = pd.read_csv('/kaggle/input/machine-learning-challenge-2-prediction/test_set_rna.csv', index_col=0).T print(df_X.shape, df_Y.shape, df_X_submission.shape) display(df_X.head(2)) display(df_Y.head(2))
code
122249481/cell_20
[ "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: # 'PTPRC' is name for CD45 gene y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (20,10)) fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1,2,2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1,0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1,0], ) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) for col in ['CD45RA', 'CD45RO']: n_bins = 50 fig = plt.figure(figsize = (20,4)) fig.add_subplot(1,2,1) mask = df['PTPRC'] == 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('ZERO RNA condition ' + col) fig.add_subplot(1,2,2) mask = df['PTPRC'] != 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('NON-ZERO RNA condition ' + col) plt.show() # print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) d2 = pd.DataFrame() for c in ['CD45RA', 'CD45RO', 'MALAT1', 'NEAT1', 'CD45', 'SNRPD2', 'SNRPE', 'SRSF1', 'SRSF5', 'PTPRC']: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] d2.corr() sns.clustermap(d2.corr().round(2), annot=True) l = ['CD45RA', 'CD45RO', 'CD45', 'PTPRC', 'CD53', 'CD4', 'CD28', 'CD37', 'CD81', 'APC', 'SP1', 'CD151', 'MUC1', 'SDS', 'CD69', 'PTPRC', 'CD9', 'LCK', 'CD44', 'UCHL1', 'CD5', 'CD55', 'BCR', 'NHS', 'STAR', 'ZAP70'] d2 = pd.DataFrame() for c in l: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] d2.corr() sns.clustermap(d2.corr().round(2), annot=True) plt.show()
code
122249481/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import time import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import time t0start = time.time() import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122249481/cell_19
[ "text_html_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: # 'PTPRC' is name for CD45 gene y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (20,10)) fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1,2,2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1,0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1,0], ) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) for col in ['CD45RA', 'CD45RO']: n_bins = 50 fig = plt.figure(figsize = (20,4)) fig.add_subplot(1,2,1) mask = df['PTPRC'] == 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('ZERO RNA condition ' + col) fig.add_subplot(1,2,2) mask = df['PTPRC'] != 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('NON-ZERO RNA condition ' + col) plt.show() # print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) d2 = pd.DataFrame() for c in ['CD45RA', 'CD45RO', 'MALAT1', 'NEAT1', 'CD45', 'SNRPD2', 'SNRPE', 'SRSF1', 'SRSF5', 'PTPRC']: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] d2.corr() sns.clustermap(d2.corr().round(2), annot=True) l = ['CD45RA', 'CD45RO', 'CD45', 'PTPRC', 'CD53', 'CD4', 'CD28', 'CD37', 'CD81', 'APC', 'SP1', 'CD151', 'MUC1', 'SDS', 'CD69', 'PTPRC', 'CD9', 'LCK', 'CD44', 'UCHL1', 'CD5', 'CD55', 'BCR', 'NHS', 'STAR', 'ZAP70'] d2 = pd.DataFrame() for c in l: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] else: print(c, 'absent') d2.corr()
code
122249481/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import seaborn as sns fig = plt.figure(figsize=(20, 8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA', 'CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c += 1 fig.add_subplot(1, 3, c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print(np.corrcoef(x_loc, y_loc)[0, 1], 'correlation ', col_x, col) m = (x_loc != 0) & (y_loc != 0) cc = np.corrcoef(x_loc[m], y_loc[m])[0, 1] print(cc, 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show()
code
122249481/cell_8
[ "text_plain_output_1.png" ]
df_corr = df.corr() N = 20 d = df_corr['CD53'].sort_values(ascending=False, key=abs).head(N).to_frame() for t in df_corr['CD53'].sort_values(ascending=False, key=abs).index[:N]: m = (df[t] != 0) & (df['CD53'] != 0) c = np.corrcoef(df[t][m], df['CD53'][m])[0, 1] d.loc[t, 'Corr non zeros'] = c d
code
122249481/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: # 'PTPRC' is name for CD45 gene y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (20,10)) fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1,2,2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1,0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1,0], ) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) d2 = pd.DataFrame() for c in ['CD45RA', 'CD45RO', 'MALAT1', 'NEAT1', 'CD45', 'SNRPD2', 'SNRPE', 'SRSF1', 'SRSF5', 'PTPRC']: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] else: print(c, 'absent') d2.corr()
code
122249481/cell_17
[ "image_output_5.png", "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: # 'PTPRC' is name for CD45 gene y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (20,10)) fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1,2,2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1,0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1,0], ) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) for col in ['CD45RA', 'CD45RO']: n_bins = 50 fig = plt.figure(figsize = (20,4)) fig.add_subplot(1,2,1) mask = df['PTPRC'] == 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('ZERO RNA condition ' + col) fig.add_subplot(1,2,2) mask = df['PTPRC'] != 0 plt.hist(np.clip(df_Y[col][mask],0,2.5), bins = n_bins) plt.title('NON-ZERO RNA condition ' + col) plt.show() # print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) d2 = pd.DataFrame() for c in ['CD45RA', 'CD45RO', 'MALAT1', 'NEAT1', 'CD45', 'SNRPD2', 'SNRPE', 'SRSF1', 'SRSF5', 'PTPRC']: if c in df.columns: d2[c] = df[c] elif c in df_Y.columns: d2[c] = df_Y[c] d2.corr() sns.clustermap(d2.corr().round(2), annot=True) plt.show()
code
122249481/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display( df_Y[['CD45RA','CD45RO', ]].corr() ) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (10,10)) # fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) # fig.add_subplot(1,2,2) # x_loc = df_Y['CD45RO'] # sns.scatterplot(x=x_loc, y=y_loc) plt.show() if 'PTPRC' in df.columns: # 'PTPRC' is name for CD45 gene y_loc = df['PTPRC'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize = (20,10)) fig.add_subplot(1,2,1) sns.scatterplot(x=x_loc, y=y_loc) fig.add_subplot(1,2,2) x_loc = df_Y['CD45RO'] sns.scatterplot(x=x_loc, y=y_loc) plt.show() print('Pearson correlations CD45RA, CD45RO:') print(np.corrcoef(df['PTPRC'], df_Y['CD45RA'])[1,0], np.corrcoef(df['PTPRC'], df_Y['CD45RO'])[1,0], ) print('Pearson correlations excluding zero rna values CD45RA, CD45RO:') mask = df['PTPRC'] != 0 print(np.corrcoef(df['PTPRC'][mask], df_Y['CD45RA'][mask])[1,0], np.corrcoef(df['PTPRC'][mask], df_Y['CD45RO'][mask])[1,0], ) for col in ['CD45RA', 'CD45RO']: n_bins = 50 fig = plt.figure(figsize=(20, 4)) fig.add_subplot(1, 2, 1) mask = df['PTPRC'] == 0 plt.hist(np.clip(df_Y[col][mask], 0, 2.5), bins=n_bins) plt.title('ZERO RNA condition ' + col) fig.add_subplot(1, 2, 2) mask = df['PTPRC'] != 0 plt.hist(np.clip(df_Y[col][mask], 0, 2.5), bins=n_bins) plt.title('NON-ZERO RNA condition ' + col) plt.show()
code
122249481/cell_10
[ "text_plain_output_1.png" ]
import umap reducer = umap.UMAP(random_state=42) r = reducer.fit_transform(df) dict_reds = {} dict_reds['umap'] = r n_x_subplots = 2 c = 0 str_data_inf = 'CITEseq2302' l = ['CD45RA', 'CD45RO', 'PTPRC', 'CD53', 'MALAT1', 'NEAT1', 'CD3', 'CD4', 'CD69'] for gene in l[:40]: if gene in df.columns: v4color = df[gene] elif gene in df_Y.columns: v4color = df_Y[gene] else: continue v4color = np.clip(v4color, np.percentile(v4color, 5), np.percentile(v4color, 95)) if c % n_x_subplots == 0: if c > 0: plt.show() fig = plt.figure(figsize=(20, 4)) c = 0 plt.suptitle(' UMAP ' + str_data_inf + ' n_samples=' + str(len(df)), fontsize=12) c += 1 fig.add_subplot(1, n_x_subplots, c) sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color, palette='rainbow')
code
122249481/cell_12
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import seaborn as sns fig = plt.figure(figsize = (20,8)) col_x = 'CD53' x_loc = df[col_x] c = 0 for col in ['CD45RA','CD45RO', 'PTPRC']: if col in df_Y.columns: y_loc = df_Y[col] else: y_loc = df[col] c+=1; fig.add_subplot(1,3,c) sns.scatterplot(x=x_loc, y=y_loc) plt.xlabel(col_x) plt.ylabel(col) print( np.corrcoef(x_loc, y_loc)[0,1], 'correlation ', col_x, col) m = (x_loc !=0 ) &( y_loc != 0 ) cc = np.corrcoef(x_loc[m], y_loc[m])[0,1] print(cc , 'correlation excluding zeros ', col_x, col) if np.abs(cc) > 0.5: print('WOW it is big ! ') plt.show() display(df_Y[['CD45RA', 'CD45RO']].corr()) y_loc = df_Y['CD45RO'] x_loc = df_Y['CD45RA'] fig = plt.figure(figsize=(10, 10)) sns.scatterplot(x=x_loc, y=y_loc) plt.show()
code
122249481/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from scipy import stats d1_corr = pd.DataFrame(index=df.columns) res = stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4]) col1 = 'CD45RA' for col in df.columns: v0 = df[col] v1 = df_Y[col1] res = stats.pearsonr(v0, v1) d1_corr.loc[col, 'Corr ' + col1] = res[0] d1_corr.loc[col, 'pvalue ' + col1] = res[1] d1_corr.to_csv() m = d1_corr[d1_corr.columns[1]] < 0.05 / len(d1_corr) print(m.sum()) display(d1_corr[m].sort_values(d1_corr.columns[0], ascending=False, key=abs)) d1_corr.sort_values(d1_corr.columns[0], ascending=False, key=abs)
code
16125229/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) df.groupby(by='state')['user_id'].count().sort_values(ascending=False)
code
16125229/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') state.columns
code
16125229/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) df.groupby(by='state')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) corrmat = df.corr() sns.set(font_scale=1) fig, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(corrmat, vmax=1, vmin=-1, square=True, annot=True)
code
16125229/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) df.groupby(by='state')['user_id'].count().sort_values(ascending=False) plt.figure(figsize=(15, 10)) df.groupby(by='state')['user_id'].count().sort_values(ascending=False).plot.bar() plt.xticks(rotation=50) plt.xlabel('Estados') plt.ylabel('Número de plays') plt.show()
code
16125229/cell_6
[ "image_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns
code
16125229/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) df.groupby(by='state')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) corrmat = df.corr() sns.set(font_scale=1) fig, ax = plt.subplots(figsize=(10,10)) sns.heatmap(corrmat, vmax=1, vmin=-1, square=True, annot=True); fig, ax = plt.subplots(figsize=(10,10)) ax.scatter(df['duration'], df['value']) ax.set_title('Music Dataset') ax.set_xlabel('Duration') ax.set_ylabel('Value') fig, ax = plt.subplots(figsize=(10, 10)) ax.scatter(df['duration'], df['plays']) ax.set_title('Music Dataset') ax.set_xlabel('Duration') ax.set_ylabel('Plays')
code
16125229/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns hits.head()
code
16125229/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.figure(figsize=(15, 10)) df.groupby(by='genre')['user_id'].count().sort_values(ascending=False).plot.bar() plt.xticks(rotation=50) plt.xlabel('Gênero') plt.ylabel('Número de plays') plt.show()
code
16125229/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') musics.columns
code
16125229/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') genre.columns
code
16125229/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.head()
code
16125229/cell_16
[ "text_html_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878')
code
16125229/cell_17
[ "text_html_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False)
code
16125229/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns
code
16125229/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') musics.columns musics.head()
code
16125229/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hits = pd.read_csv('../input/hits.csv') musics = pd.read_csv('../input/music_data.csv') genre = pd.read_csv('../input/genre.csv') state = pd.read_csv('../input/state.csv') hits.columns musics.columns genre.columns state.columns df = hits.merge(genre, on='genre_id').merge(state, on='state_id').merge(musics, on='music_id') df.columns df.query('user_id==878') df.groupby(by='genre')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) df.groupby(by='state')['user_id'].count().sort_values(ascending=False) plt.xticks(rotation=50) corrmat = df.corr() sns.set(font_scale=1) fig, ax = plt.subplots(figsize=(10,10)) sns.heatmap(corrmat, vmax=1, vmin=-1, square=True, annot=True); fig, ax = plt.subplots(figsize=(10, 10)) ax.scatter(df['duration'], df['value']) ax.set_title('Music Dataset') ax.set_xlabel('Duration') ax.set_ylabel('Value')
code
90118469/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import numpy as np import random lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, y_pred) X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda.predict(X_attack) disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, Y_attack) X_attack1 = X_attack.reshape(X_attack.shape[0], 28, 28) import random for i in range(0, 10): s = random.randint(0, X_attack.shape[0]) print(s) plt.imshow(X_attack1[s]) plt.show()
code
90118469/cell_13
[ "text_plain_output_1.png" ]
X_test_final = X_test.to_numpy(dtype='uint8') print(X_test_final)
code
90118469/cell_9
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from sklearn.metrics import accuracy_score import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape
code
90118469/cell_11
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, y_pred) disp.figure_.suptitle('Confusion Matrix') print(f'Confusion matrix:\n{disp.confusion_matrix}') plt.show()
code
90118469/cell_19
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, y_pred) X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda.predict(X_attack) disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, Y_attack) disp.figure_.suptitle('Confusion Matrix') print(f'Confusion matrix:\n{disp.confusion_matrix}') plt.show()
code
90118469/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) print(accuracy_score(y_test, y_pred)) print(y_pred.shape)
code
90118469/cell_18
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, y_pred) X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda.predict(X_attack) print(f'Classification report for classifier {lda}:\n{metrics.classification_report(y_test, Y_attack)}\n')
code
90118469/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape
code
90118469/cell_15
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) print(X_attack.shape)
code
90118469/cell_16
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda.predict(X_attack)
code
90118469/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd mnist_train = pd.read_csv('../input/mnist-in-csv/mnist_train.csv') mnist_train.head()
code
90118469/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape X_test_final = X_test.to_numpy(dtype='uint8') X_attack = X_test_final - (X_test_final @ np.transpose(w) + w0) @ w / np.linalg.norm(w) Y_attack = lda.predict(X_attack) print(accuracy_score(y_test, Y_attack)) print(Y_attack.shape)
code
90118469/cell_10
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.metrics import accuracy_score import numpy as np lda = LDA(n_components=1) X_train_r2 = lda.fit(X_train, y_train) y_pred = lda.predict(X_test) w = lda.coef_ w.shape w0 = lda.intercept_ np.transpose(w0).shape print(f'Classification report for classifier {lda}:\n{metrics.classification_report(y_test, y_pred)}\n')
code
17121374/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') print(test_df.shape) test_df.describe()
code
17121374/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.head(10)
code
17121374/cell_20
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns train_df.dtypes (train_df.isnull().sum() / 1460 * 100).iloc[0:50] (test_df.isnull().sum() / 1460 * 100).iloc[50:82] test_df.drop(['Id', 'PoolQC', 'Fence', 'MiscFeature', 'FireplaceQu', 'Alley'], axis=1).head() train_df.drop(['Id', 'PoolQC', 'Fence', 'MiscFeature', 'FireplaceQu', 'Alley'], axis=1).head() num_attributes = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenQual', 'TotRmsAbvGrd', 'Fireplaces', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal', 'MoSold', 'YrSold', 'SalePrice']] corr = num_attributes.corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-0.4, vmax=0.4, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
code
17121374/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns
code
17121374/cell_19
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns train_df.dtypes (train_df.isnull().sum() / 1460 * 100).iloc[0:50] (test_df.isnull().sum() / 1460 * 100).iloc[50:82] test_df.drop(['Id', 'PoolQC', 'Fence', 'MiscFeature', 'FireplaceQu', 'Alley'], axis=1).head() train_df.drop(['Id', 'PoolQC', 'Fence', 'MiscFeature', 'FireplaceQu', 'Alley'], axis=1).head()
code
17121374/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import os print(os.listdir('../input'))
code
17121374/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns train_df.dtypes
code
17121374/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns train_df.dtypes (train_df.isnull().sum() / 1460 * 100).iloc[0:50]
code
17121374/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') (test_df.isnull().sum() / 1460 * 100).iloc[50:82]
code
17121374/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') train_df.columns train_df.dtypes print(train_df.shape) train_df.describe()
code
17121374/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sub_df = pd.read_csv('../input/sample_submission.csv') test_df.head(10)
code
48163942/cell_42
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape fig = plt.figure(figsize=(15, 8)) most_freq_category = train.groupby('Product_Category')['Selling_Price'].sum().reset_index() sns.barplot(x='Product_Category', y='Selling_Price', data=most_freq_category, palette='muted')
code
48163942/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() print(train.columns)
code
48163942/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) sns.distplot(train['Selling_Price'])
code
48163942/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() sns.distplot(train['charges_2 (%)'])
code
48163942/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape #Function to analye how Purchase amount is dependent upon Product categories. fig = plt.figure(figsize=(15,8)) most_freq_category = train.groupby('Product_Category')['Selling_Price'].sum().reset_index() sns.barplot(x='Product_Category',y='Selling_Price',data = most_freq_category,palette="muted") train['Loyalty_customer'].value_counts()
code
48163942/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape train.info()
code
48163942/cell_26
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() sns.distplot(train['charges_1'])
code
48163942/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape train['Product_Category'].nunique()
code
48163942/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() test.head()
code
48163942/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train.describe()
code
48163942/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() def showMissingValues(dataset): for col in dataset.columns.tolist(): print(f' {col} column missing values: {dataset[col].isnull().sum()}') print('\n') print('Train data-------------------------------------') showMissingValues(train) print('Validation dataset--------------------------------------') showMissingValues(test)
code
48163942/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape
code
48163942/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape #Function to analye how Purchase amount is dependent upon Product categories. fig = plt.figure(figsize=(15,8)) most_freq_category = train.groupby('Product_Category')['Selling_Price'].sum().reset_index() sns.barplot(x='Product_Category',y='Selling_Price',data = most_freq_category,palette="muted") train['Loyalty_customer'].nunique()
code
48163942/cell_46
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape #Function to analye how Purchase amount is dependent upon Product categories. fig = plt.figure(figsize=(15,8)) most_freq_category = train.groupby('Product_Category')['Selling_Price'].sum().reset_index() sns.barplot(x='Product_Category',y='Selling_Price',data = most_freq_category,palette="muted") train['Customer_name'].nunique()
code
48163942/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train.info()
code
48163942/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train[train['Customer_name'] == 'Missing'].head()
code
48163942/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() train.head()
code
48163942/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() def showMissingValues(dataset): pass train = train.rename(columns={'charges_2 (%)': 'charges_2'}) test = test.rename(columns={'charges_2 (%)': 'charges_2'}) train['charges_2'] = train['charges_2'].fillna(train['charges_1'].median()) test['charges_2'] = test['charges_2'].fillna(test['charges_1'].median()) train = train[~train['Selling_Price'].isna()] train.shape def showMissingValues(dataset): for col in dataset.columns.tolist(): print(f' {col} column missing values: {dataset[col].isnull().sum()}') print('\n') print('Train data-------------------------------------') showMissingValues(train) print('Test dataset--------------------------------------') showMissingValues(test)
code
48163942/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) sns.set() from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.feature_selection import VarianceThreshold from scipy.stats import norm from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') train_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/train.csv') test_original = pd.read_csv('../input/hackerearth-carnival-wars-challenge/test.csv') sample_submission = pd.read_csv('../input/hackerearth-carnival-wars-challenge/sample_submission.csv') train = train_original.copy() test = test_original.copy() print(f'Train Datset shape : {train.shape}') print(f'Test Datset shape : {test.shape}')
code
48163942/cell_5
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33118743/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) def get_im_with_same_ioshape(file_path, name, show=False, mode='train'): train = [] test = [] with open(file_path + name, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape != im_out.shape: return None for im in all_im['train']: im_in = np.array(im['input']) im_out = np.array(im['output']) mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8') train.append((im_in, im_out, mask)) if mode == 'train': for im in all_im['test']: im_in = np.array(im['input']) im_out = np.array(im['output']) test.append((im_in, im_out)) if mode == 'test': for im in all_im['test']: im_in = np.array(im['input']) test.append(im_in) return (train, test) train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False) def get_features(input_): im_in, im_out, mask = input_ features = np.zeros((sum(sum(mask)), 8)) colors = np.zeros(sum(sum(mask))) f = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if mask[y, x] == 1: pix_exp = np.zeros(8) n_p = 0 for dy in range(-1, 2): for dx in range(-1, 2): if dy != 0 or dx != 0: if dx + x >= 0 and dy + y >= 0 and (dx + x < mask.shape[1]) and (dy + y < mask.shape[0]): pix_exp[n_p] = im_in[y + dy, x + dx] else: pix_exp[n_p] = -1 n_p += 1 features[f] = pix_exp colors[f] = im_out[y, x] f += 1 return (features, colors) def get_cf(train): features_set = [] colors_set = [] for in_out_mask in train: features, colors = get_features(in_out_mask) features_set += list(features) colors_set += list(colors) features_set_min = np.unique(np.array(features_set), axis=0) colors_min = np.zeros(len(features_set_min)) for n, feature in enumerate(features_set): if feature in features_set_min: for i, feature_uniq in enumerate(features_set_min): if str(feature_uniq) == str(feature): break colors_min[i] = colors_set[n] return (colors_min, features_set_min) colors_min, features_set_min = get_cf(train) def make_pred(im_in, features, colors): im_out = im_in.copy() f = 0 for y in range(im_in.shape[0]): for x in range(im_in.shape[1]): pix_exp = np.zeros(8) n_p = 0 for dy in range(-1, 2): for dx in range(-1, 2): if dy != 0 or dx != 0: if dx + x >= 0 and dy + y >= 0 and (dx + x < im_in.shape[1]) and (dy + y < im_in.shape[0]): pix_exp[n_p] = im_in[y + dy, x + dx] else: pix_exp[n_p] = -1 n_p += 1 for n, f in enumerate(features): if str(f) == str(pix_exp): im_out[y, x] = colors[n] return im_out pred = make_pred(test[0][0], features_set_min, colors_min) print(test[0][0]) print(pred) print(test[0][1]) print(np.sum(np.sum(np.where(np.nan_to_num((pred - test[0][1]) / (pred - test[0][1]), 0) != 0, 1, 0))))
code
33118743/cell_4
[ "text_plain_output_1.png" ]
from tqdm import tqdm import json import numpy as np # linear algebra import os train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/' test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/' same_shape = [] for ex in tqdm(os.listdir(evaluation_path)): with open(evaluation_path + ex, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape == im_out.shape: same_shape.append(ex) def get_im_with_same_ioshape(file_path, name, show=False, mode='train'): train = [] test = [] with open(file_path + name, 'r') as train_file: all_im = json.load(train_file) im_in = np.array(all_im['train'][0]['input']) im_out = np.array(all_im['train'][0]['output']) if im_in.shape != im_out.shape: return None for im in all_im['train']: im_in = np.array(im['input']) im_out = np.array(im['output']) mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8') train.append((im_in, im_out, mask)) if show: print('NAME:\n', same_shape[N]) print('IN:\n', im_in) print('OUT:\n', im_out) print('MASK:\n', mask) if mode == 'train': for im in all_im['test']: im_in = np.array(im['input']) im_out = np.array(im['output']) test.append((im_in, im_out)) if mode == 'test': for im in all_im['test']: im_in = np.array(im['input']) test.append(im_in) return (train, test) train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False)
code