path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72083691/cell_35
[ "text_html_output_1.png" ]
from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax from mlxtend.preprocessing import TransactionEncoder from mlxtend.preprocessing import TransactionEncoder import pandas as pd import time data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv') data.shape all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))] trans_encoder = TransactionEncoder() trans_encoder_matrix = trans_encoder.fit(all_transactions).transform(all_transactions) trans_encoder_matrix = pd.DataFrame(trans_encoder_matrix, columns=trans_encoder.columns_) def perform_rule_calculation(transact_items_matrix, rule_type, min_support=0.001): """ excution time for the corresponding algorithm """ start_time = 0 total_execution = 0 if rule_type == 'fpmax': start_time = time.time() rule_items = fpmax(transact_items_matrix, min_support=min_support, use_colnames=True) total_execution = time.time() - start_time if rule_type == 'apriori': start_time = time.time() rule_items = apriori(transact_items_matrix, min_support=min_support, use_colnames=True) total_execution = time.time() - start_time if rule_type == 'Fpgrowth': start_time = time.time() rule_items = fpgrowth(transact_items_matrix, min_support=min_support, use_colnames=True) total_execution = time.time() - start_time rule_items['number_of_items'] = rule_items['itemsets'].apply(lambda x: len(x)) return (rule_items, total_execution) apriori_matrix, apriori_exec_time = perform_rule_calculation(trans_encoder_matrix, rule_type='apriori') print('Apriori Execution took: {} seconds'.format(apriori_exec_time))
code
72083691/cell_43
[ "image_output_1.png" ]
from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax def compute_association_rule(rule_matrix, metric='lift', min_thresh=1): """ Compute the final association rule rule_matrix: the corresponding algorithms matrix metric: the metric to be used (default is lift) min_thresh: the minimum threshold (default is 1) Returns Rules:: Information for each transaction satisfying the given metric & threshold """ rules = association_rules(rule_matrix, metric=metric, min_threshold=min_thresh) return rules apripri_rule = compute_association_rule(apriori_matrix, metric='confidence', min_thresh=0.2) apripri_rule.head()
code
72083691/cell_31
[ "text_html_output_1.png" ]
from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax def compute_association_rule(rule_matrix, metric='lift', min_thresh=1): """ Compute the final association rule rule_matrix: the corresponding algorithms matrix metric: the metric to be used (default is lift) min_thresh: the minimum threshold (default is 1) Returns Rules:: Information for each transaction satisfying the given metric & threshold """ rules = association_rules(rule_matrix, metric=metric, min_threshold=min_thresh) return rules fp_growth_rule_lift = compute_association_rule(fpgrowth_matrix) fp_growth_rule_lift.head()
code
72083691/cell_46
[ "text_plain_output_1.png" ]
from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax from mlxtend.preprocessing import TransactionEncoder from mlxtend.preprocessing import TransactionEncoder import pandas as pd data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv') data.shape all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))] trans_encoder = TransactionEncoder() trans_encoder_matrix = trans_encoder.fit(all_transactions).transform(all_transactions) trans_encoder_matrix = pd.DataFrame(trans_encoder_matrix, columns=trans_encoder.columns_) fmax = fpmax(trans_encoder_matrix, min_support=0.01, use_colnames=True) fmax.head()
code
72083691/cell_24
[ "text_html_output_1.png" ]
val = {'name': 12} value = list(val.items())[0] value
code
72083691/cell_14
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv') data.shape all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))] len(all_transactions)
code
72083691/cell_27
[ "text_plain_output_1.png" ]
fpgrowth_matrix.head()
code
72083691/cell_37
[ "text_html_output_1.png" ]
apriori_matrix.tail()
code
72083691/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv') data.shape data.head()
code
72083691/cell_36
[ "text_plain_output_1.png" ]
apriori_matrix.head()
code
106212906/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape total_restaurent = df_new['Area'].value_counts() total_restaurent
code
106212906/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) category = df_new['Category'].value_counts(ascending=False) category
code
106212906/cell_25
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new.describe()
code
106212906/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new['Cost for Two (in Rupees)'].unique()
code
106212906/cell_30
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df1 = df_new['Category'].value_counts() df1 df2 = df1.iloc[1:] df2
code
106212906/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df1 = df_new['Category'].value_counts() df1 df2 = df1.iloc[1:] df2 df2.to_csv('category_type') df3 = pd.read_csv('category_type') df3.columns = ['Category', 'Count'] df3
code
106212906/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].unique()
code
106212906/cell_29
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df1 = df_new['Category'].value_counts() df1
code
106212906/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new.head()
code
106212906/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.head()
code
106212906/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new.head()
code
106212906/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df.head()
code
106212906/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape
code
106212906/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df1 = df_new['Category'].value_counts() df1 df2 = df1.iloc[1:] df2 df2.to_csv('category_type') df3 = pd.read_csv('category_type') df3
code
106212906/cell_28
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new.head()
code
106212906/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df.info()
code
106212906/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) category = df_new['Category'].value_counts(ascending=False) category category_less_then_100 = category[category < 100] category_less_then_100 def handle_categgory(value): if value in category_less_then_100: return 'Others' else: return value df_new['Category'] = df_new['Category'].apply(handle_categgory) df_new['Category'].value_counts()
code
106212906/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from IPython.display import Image Image('../input/swiggy/swiggy.jpg', width=750)
code
106212906/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape
code
106212906/cell_35
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna plt.xticks(rotation=90) df1 = df_new['Category'].value_counts() df1 df2 = df1.iloc[1:] df2 df2.to_csv('category_type') df3 = pd.read_csv('category_type') df3.columns = ['Category', 'Count'] df3 sns.set_theme() plt.figure(figsize=(10, 5)) sns.barplot(data=df3, x='Category', y='Count') plt.xticks(rotation=45)
code
106212906/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new['Cost for Two (in Rupees)'].value_counts()
code
106212906/cell_14
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) category = df_new['Category'].value_counts(ascending=False) category category_less_then_100 = category[category < 100] category_less_then_100
code
106212906/cell_22
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna df_new.head()
code
106212906/cell_10
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df.head()
code
106212906/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new.shape df_new.drop_duplicates(inplace=True) df_new.shape df_new['Area'].dropna plt.figure(figsize=(15, 9)) sns.countplot(df_new['Area']).set(title='Number of Restaurents') plt.xticks(rotation=90)
code
106212906/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/swiggy-bangalore/Swiggy Bangalore.csv') df = df.drop(['Offer'], axis=1) df_new = df.drop(['Rating', 'URL'], axis=1) df_new['Category'].nunique()
code
32065944/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu'))
code
32065944/cell_34
[ "text_plain_output_1.png" ]
import gc del clustermap del distance_matrix del distances del Z gc.collect()
code
32065944/cell_20
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tfidf_vectorizer = TfidfVectorizer(input='content', lowercase=False, preprocessor=lambda text: text, tokenizer=lambda text: text.split(' '), token_pattern=None, analyzer='word', stop_words=None, ngram_range=(1, 1), max_features=10000, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) features = tfidf_vectorizer.fit_transform(df['text_simplified']) features.shape features = features.astype('float32').toarray() sample_size = 0.1 sample_mask = np.random.choice(a=[True, False], size=len(features), p=[sample_size, 1 - sample_size]) features_sample = features[sample_mask] features_sample.shape
code
32065944/cell_40
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.8].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.95].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) cluster_count = df['cluster'].value_counts().sort_values() ax = cluster_count.plot(kind='bar', figsize=(15, 5)) ax.set_xticks([]) ax.set_xlabel('Cluster id') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_29
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.8].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.95].hist(bins=100, figsize=(15, 5)) ax.set_xlabel('Cosine dissimilarity') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_2
[ "text_plain_output_1.png" ]
!pip install fastcluster
code
32065944/cell_28
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.8].hist(bins=100, figsize=(15, 5)) ax.set_xlabel('Cosine dissimilarity') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_16
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tfidf_vectorizer = TfidfVectorizer(input='content', lowercase=False, preprocessor=lambda text: text, tokenizer=lambda text: text.split(' '), token_pattern=None, analyzer='word', stop_words=None, ngram_range=(1, 1), max_features=10000, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) features = tfidf_vectorizer.fit_transform(df['text_simplified']) features.shape
code
32065944/cell_43
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.8].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.95].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) cluster_count = df['cluster'].value_counts().sort_values() ax = cluster_count.plot(kind='bar', figsize=(15, 5)) ax.set_xticks([]) ax.set_xlabel("Cluster id") ax.set_ylabel("Count") ax.grid(True) noise_clusters = set(cluster_count[cluster_count <= 5].index) noise_mask = df['cluster'].isin(noise_clusters) df.loc[noise_mask, 'cluster'] = -1 cluster_count = df['cluster'].value_counts().sort_values() ax = cluster_count.plot(kind='bar', figsize=(15, 5)) ax.set_xticks([]) ax.set_xlabel('Cluster id') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from itertools import chain from scipy.cluster.hierarchy import fcluster from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count tfidf_vectorizer = TfidfVectorizer(input='content', lowercase=False, preprocessor=lambda text: text, tokenizer=lambda text: text.split(' '), token_pattern=None, analyzer='word', stop_words=None, ngram_range=(1, 1), max_features=10000, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) features = tfidf_vectorizer.fit_transform(df['text_simplified']) features.shape features = features.astype('float32').toarray() sample_size = 0.1 sample_mask = np.random.choice(a=[True, False], size=len(features), p=[sample_size, 1 - sample_size]) features_sample = features[sample_mask] features_sample.shape sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) dissimilarities = pd.Series(distance_matrix.flatten()) clusters = fcluster(Z, t=0.999, criterion='distance') clustermap = sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) cluster_mapping = dict(zip(range(len(features_sample)), clusters)) clustermap_clusters = pd.Series([cluster_mapping[id_] for id_ in list(clustermap.data2d.columns)]) for cluster in set(clusters): cluster_range = list(clustermap_clusters[clustermap_clusters == cluster].index) clustermap.ax_heatmap.add_patch(patches.Rectangle(xy=(np.min(cluster_range), np.min(cluster_range)), width=len(cluster_range), height=len(cluster_range), fill=False, edgecolor='lightgreen', lw=2)) print(f'There are {clustermap_clusters.nunique()} clusters.')
code
32065944/cell_46
[ "text_html_output_1.png" ]
from itertools import chain from scipy.cluster.hierarchy import fcluster from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) tfidf_vectorizer = TfidfVectorizer(input='content', lowercase=False, preprocessor=lambda text: text, tokenizer=lambda text: text.split(' '), token_pattern=None, analyzer='word', stop_words=None, ngram_range=(1, 1), max_features=10000, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) features = tfidf_vectorizer.fit_transform(df['text_simplified']) features.shape features = features.astype('float32').toarray() sample_size = 0.1 sample_mask = np.random.choice(a=[True, False], size=len(features), p=[sample_size, 1 - sample_size]) features_sample = features[sample_mask] features_sample.shape sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.8].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) ax = dissimilarities[dissimilarities >= 0.95].hist(bins=100, figsize=(15, 5)) ax.set_xlabel("Cosine dissimilarity") ax.set_ylabel("Count") ax.grid(True) clusters = fcluster(Z, t=0.999, criterion='distance') clustermap = sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) cluster_mapping = dict(zip(range(len(features_sample)), clusters)) clustermap_clusters = pd.Series([cluster_mapping[id_] for id_ in list(clustermap.data2d.columns)]) for cluster in set(clusters): cluster_range = list(clustermap_clusters[clustermap_clusters == cluster].index) clustermap.ax_heatmap.add_patch(patches.Rectangle(xy=(np.min(cluster_range), np.min(cluster_range)), width=len(cluster_range), height=len(cluster_range), fill=False, edgecolor='lightgreen', lw=2)) cluster_count = df['cluster'].value_counts().sort_values() ax = cluster_count.plot(kind='bar', figsize=(15, 5)) ax.set_xticks([]) ax.set_xlabel("Cluster id") ax.set_ylabel("Count") ax.grid(True) noise_clusters = set(cluster_count[cluster_count <= 5].index) noise_mask = df['cluster'].isin(noise_clusters) df.loc[noise_mask, 'cluster'] = -1 columns = np.array(tfidf_vectorizer.get_feature_names()) top_k = 3 def describe(df: pd.DataFrame) -> pd.DataFrame: order = features[df.index].mean(axis=0).argsort()[::-1][:top_k] top_words = columns[order] cluster_id = df['cluster'].iloc[0] for i, word in enumerate(top_words): df[f'word_{i + 1}'] = word if cluster_id != -1 else '' return df df = df.groupby('cluster').apply(describe) df.filter(regex='text_simplified|word_\\d+', axis=1)
code
32065944/cell_24
[ "text_plain_output_1.png" ]
distances = squareform(distance_matrix, force='tovector') Z = fastcluster.linkage(distances, method='complete', preserve_input=True)
code
32065944/cell_14
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel('Token') (ax.set_ylabel('Count'),) ax.grid(True)
code
32065944/cell_22
[ "text_plain_output_1.png" ]
distance_matrix = pairwise_distances(features_sample, metric='cosine')
code
32065944/cell_10
[ "text_plain_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count
code
32065944/cell_27
[ "image_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count") ax.grid(True) ax = tokens_count[10000:].plot(figsize=(15, 5)) ax.set_xlabel("Token") ax.set_ylabel("Count"), ax.grid(True) dissimilarities = pd.Series(distance_matrix.flatten()) ax = dissimilarities.hist(bins=100, figsize=(15, 5)) ax.set_xlabel('Cosine dissimilarity') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_12
[ "text_plain_output_1.png" ]
from itertools import chain import pandas as pd df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count ax = tokens_count.plot(figsize=(15, 5)) ax.set_xlabel('Token') ax.set_ylabel('Count') ax.grid(True)
code
32065944/cell_36
[ "text_plain_output_1.png" ]
from itertools import chain from scipy.cluster.hierarchy import fcluster from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.neighbors import KNeighborsClassifier import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv') df = df.dropna(subset=['text_simplified']).reset_index(drop=True) tokens = df['text_simplified'].str.split(' ').tolist() tokens = pd.Series(chain(*tokens)) tokens_count = tokens.value_counts() tokens_count tfidf_vectorizer = TfidfVectorizer(input='content', lowercase=False, preprocessor=lambda text: text, tokenizer=lambda text: text.split(' '), token_pattern=None, analyzer='word', stop_words=None, ngram_range=(1, 1), max_features=10000, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) features = tfidf_vectorizer.fit_transform(df['text_simplified']) features.shape features = features.astype('float32').toarray() sample_size = 0.1 sample_mask = np.random.choice(a=[True, False], size=len(features), p=[sample_size, 1 - sample_size]) features_sample = features[sample_mask] features_sample.shape sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) dissimilarities = pd.Series(distance_matrix.flatten()) clusters = fcluster(Z, t=0.999, criterion='distance') clustermap = sns.clustermap(data=distance_matrix, col_linkage=Z, row_linkage=Z, cmap=plt.get_cmap('RdBu')) cluster_mapping = dict(zip(range(len(features_sample)), clusters)) clustermap_clusters = pd.Series([cluster_mapping[id_] for id_ in list(clustermap.data2d.columns)]) for cluster in set(clusters): cluster_range = list(clustermap_clusters[clustermap_clusters == cluster].index) clustermap.ax_heatmap.add_patch(patches.Rectangle(xy=(np.min(cluster_range), np.min(cluster_range)), width=len(cluster_range), height=len(cluster_range), fill=False, edgecolor='lightgreen', lw=2)) model = KNeighborsClassifier(n_neighbors=5, metric='cosine', n_jobs=-1) model.fit(features_sample, clusters)
code
90111983/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) print('Cases of nonconformity by gender: {}'.format(sum(df['total'] - df['male'] - df['female'])))
code
90111983/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) df.head()
code
90111983/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] plt.figure(figsize=(12, 5)) plt.title("Characteristics of China's population over the period of 70 years", fontweight='bold', fontsize=12) plt.plot(df['year'], df['female'], linewidth=3, label='female') plt.plot(df['year'], df['male'], linewidth=3, label='male') plt.plot(df['year'], df['urban'], linewidth=3, linestyle='--', label='urban') plt.plot(df['year'], df['rural'], linewidth=3, linestyle='--', label='rural') plt.xlabel('year', fontweight='bold', fontsize=10) plt.ylabel('population', fontweight='bold', fontsize=10) plt.grid(axis='x', color='0.95') plt.legend(title='Features:') plt.show()
code
90111983/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] df.describe()
code
90111983/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('The difference between the genders in total and percentage terms', fontweight='bold', fontsize = 12) ax1.set_xlabel('year', fontsize = 10,fontweight='bold') ax1.set_ylabel('total',fontweight='bold', fontsize = 10, color = 'green') plt.plot(df['year'], df['male'] - df['female'], linewidth=3,label= 'total', color = 'green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('percent', fontweight='bold', fontsize = 10) plt.plot(df['year'], (df['male'] - df['female'])/df['total']*100, linewidth=3, color = 'black', label= 'percent') ax2.tick_params(axis='y') ax2.yaxis.set_major_formatter(mtick.PercentFormatter()) fig.tight_layout() fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('Changing of population growth', fontweight='bold', fontsize=12) ax1.set_xlabel('year', fontsize=10, fontweight='bold') ax1.set_ylabel('total number', fontweight='bold', fontsize=10, color='green') plt.plot(df['year'], df['total'], linewidth=3, label='total', color='green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('total growth', fontweight='bold', fontsize=10) plt.plot(df['year'], df['total'] - df['total'].shift(), linewidth=3, color='black', label='percent') ax2.tick_params(axis='y') plt.axhline(y=0, color='red', linestyle='--') fig.tight_layout()
code
90111983/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('The difference between the genders in total and percentage terms', fontweight='bold', fontsize=12) ax1.set_xlabel('year', fontsize=10, fontweight='bold') ax1.set_ylabel('total', fontweight='bold', fontsize=10, color='green') plt.plot(df['year'], df['male'] - df['female'], linewidth=3, label='total', color='green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('percent', fontweight='bold', fontsize=10) plt.plot(df['year'], (df['male'] - df['female']) / df['total'] * 100, linewidth=3, color='black', label='percent') ax2.tick_params(axis='y') ax2.yaxis.set_major_formatter(mtick.PercentFormatter()) fig.tight_layout()
code
90111983/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) df.info()
code
90111983/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.head()
code
90111983/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] print('Cases of nonconformity by territory: {}'.format(sum(df['total'] - df['urban'] - df['rural'])))
code
90111983/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask]
code
90111983/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] print('Cases of nonconformity by gender: {}'.format(sum(df['total'] - df['male'] - df['female'])))
code
1006102/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/database.csv', low_memory=False) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(13, 15)) crims_by_relationship = data[['Relationship', 'Record ID']].groupby('Relationship').count() crimes_per_perpetrator_race = data[['Perpetrator Race', 'Record ID']].groupby('Perpetrator Race').count() crimes_per_victime_race = data[['Victim Race', 'Record ID']].groupby('Victim Race').count() crimes_per_type = data[['Crime Type', 'Record ID']].groupby('Crime Type').count() crimes_per_perpetrator_race.plot(kind='bar', ax= ax1, title='crimes per perpetrator race') crimes_per_victime_race.plot(kind='bar', ax= ax2, title='crimes per victime race') crims_by_relationship.plot(kind='bar', ax= ax3, title='crimes by relationship') crimes_per_type.plot(kind='bar', ax= ax4, title='crimes types') data1 = data[['Relationship', 'Year', 'Record ID']].groupby(['Relationship', 'Year']).count().reset_index() plt.plot(data1[data1.Relationship == 'Wife']['Year'].tolist(), data1[data1.Relationship == 'Wife']['Record ID'].tolist(), data1[data1.Relationship == 'Acquaintance']['Year'].tolist(), data1[data1.Relationship == 'Acquaintance']['Record ID'].tolist()) plt.show()
code
1006102/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import sklearn.cluster as cluster data = pd.read_csv('../input/database.csv', low_memory=False) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(13, 15)) crims_by_relationship = data[['Relationship', 'Record ID']].groupby('Relationship').count() crimes_per_perpetrator_race = data[['Perpetrator Race', 'Record ID']].groupby('Perpetrator Race').count() crimes_per_victime_race = data[['Victim Race', 'Record ID']].groupby('Victim Race').count() crimes_per_type = data[['Crime Type', 'Record ID']].groupby('Crime Type').count() crimes_per_perpetrator_race.plot(kind='bar', ax= ax1, title='crimes per perpetrator race') crimes_per_victime_race.plot(kind='bar', ax= ax2, title='crimes per victime race') crims_by_relationship.plot(kind='bar', ax= ax3, title='crimes by relationship') crimes_per_type.plot(kind='bar', ax= ax4, title='crimes types') data1 = data[['Relationship', 'Year', 'Record ID']].groupby(['Relationship', 'Year']).count().reset_index() victims = data[['Victim Sex', 'Victim Age', 'Victim Ethnicity', 'Victim Race']] import seaborn as sns import sklearn.cluster as cluster def plot_clusters(data, algorithm, args, kwds): labels = algorithm(*args, **kwds).fit_predict(data) palette = sns.color_palette('deep', np.unique(labels).max() + 1) colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] plt.scatter(data.T[0], data.T[1], c=colors, **plot_kwds) frame = plt.gca() frame.axes.get_xaxis().set_visible(False) frame.axes.get_yaxis().set_visible(False) plt.title('Clusters found by {}'.format(str(algorithm.__name__)), fontsize=24) plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14) plot_clusters(victims, cluster.KMeans, (), {'n_clusters': 6})
code
1006102/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/database.csv', low_memory=False) data.head()
code
1006102/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1006102/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/database.csv', low_memory=False) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(13, 15)) crims_by_relationship = data[['Relationship', 'Record ID']].groupby('Relationship').count() crimes_per_perpetrator_race = data[['Perpetrator Race', 'Record ID']].groupby('Perpetrator Race').count() crimes_per_victime_race = data[['Victim Race', 'Record ID']].groupby('Victim Race').count() crimes_per_type = data[['Crime Type', 'Record ID']].groupby('Crime Type').count() crimes_per_perpetrator_race.plot(kind='bar', ax=ax1, title='crimes per perpetrator race') crimes_per_victime_race.plot(kind='bar', ax=ax2, title='crimes per victime race') crims_by_relationship.plot(kind='bar', ax=ax3, title='crimes by relationship') crimes_per_type.plot(kind='bar', ax=ax4, title='crimes types')
code
1006102/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/database.csv', low_memory=False) victims = data[['Victim Sex', 'Victim Age', 'Victim Ethnicity', 'Victim Race']] victims.head()
code
89138671/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
y = data.pop('target') X = data.drop(columns=['row_id'])
code
89138671/cell_3
[ "text_plain_output_1.png" ]
data = pd.read_pickle('../input/ump-train-picklefile/train.pkl') data.drop(columns=['row_id'], inplace=True)
code
89138671/cell_5
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_20.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
for investment in np.random.choice(pd.unique(data['investment_id']), 20): data[data['investment_id'] == investment].plot('time_id', 'target')
code
34121141/cell_9
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255.0) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_it = train_datagen.flow_from_directory('/kaggle/input/dataset/train/', class_mode='categorical', batch_size=10, target_size=(224, 224)) test_it = test_datagen.flow_from_directory('/kaggle/input/dataset/test/', class_mode='categorical', batch_size=5, target_size=(224, 224)) train_it.class_indices
code
34121141/cell_7
[ "text_plain_output_1.png" ]
from keras.applications.vgg16 import VGG16 from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, BatchNormalization, Activation from keras.layers import Flatten from keras.models import Model, Sequential from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from matplotlib import pyplot import sys def define_model(): model = VGG16(include_top=False, input_shape=(224, 224, 3)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(256, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(166, activation='softmax')(class1) model = Model(inputs=model.inputs, outputs=output) opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model def summarize_diagnostics(history): filename = sys.argv[0].split('/')[-1] pyplot.close() train_datagen = ImageDataGenerator(rescale=1.0 / 255.0) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_it = train_datagen.flow_from_directory('/kaggle/input/dataset/train/', class_mode='categorical', batch_size=10, target_size=(224, 224)) test_it = test_datagen.flow_from_directory('/kaggle/input/dataset/test/', class_mode='categorical', batch_size=5, target_size=(224, 224)) model = define_model() model.summary() history = model.fit_generator(train_it, steps_per_epoch=len(train_it), validation_data=test_it, validation_steps=len(test_it), epochs=20, verbose=1) _, acc = model.evaluate_generator(test_it, steps=len(test_it), verbose=0) print('> %.3f' % (acc * 100.0)) model.save('accuracy - %.3f' % (acc * 100.0) + '.h5') summarize_diagnostics(history)
code
34121141/cell_8
[ "text_plain_output_1.png" ]
from keras.applications.vgg16 import VGG16 from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, BatchNormalization, Activation from keras.layers import Flatten from keras.models import Model, Sequential from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from matplotlib import pyplot import cv2 import numpy as np import numpy as np # linear algebra import sys def define_model(): model = VGG16(include_top=False, input_shape=(224, 224, 3)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(256, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(166, activation='softmax')(class1) model = Model(inputs=model.inputs, outputs=output) opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model def summarize_diagnostics(history): filename = sys.argv[0].split('/')[-1] pyplot.close() train_datagen = ImageDataGenerator(rescale=1.0 / 255.0) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_it = train_datagen.flow_from_directory('/kaggle/input/dataset/train/', class_mode='categorical', batch_size=10, target_size=(224, 224)) test_it = test_datagen.flow_from_directory('/kaggle/input/dataset/test/', class_mode='categorical', batch_size=5, target_size=(224, 224)) model = define_model() model.summary() history = model.fit_generator(train_it, steps_per_epoch=len(train_it), validation_data=test_it, validation_steps=len(test_it), epochs=20, verbose=1) _, acc = model.evaluate_generator(test_it, steps=len(test_it), verbose=0) model.save('accuracy - %.3f' % (acc * 100.0) + '.h5') summarize_diagnostics(history) import cv2 img = cv2.imread('/kaggle/input/dataset/test/Voltorb/Voltorb.14.png') img = cv2.resize(img, (100, 100)) pred = model.predict(np.array([img])) move_code = np.argmax(pred[0]) move_code
code
128012372/cell_21
[ "image_output_1.png" ]
from keras import regularizers from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau from keras.layers import Conv2D, Dense, BatchNormalization, Activation, Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam, RMSprop, SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import datetime import os import pandas as pd import tensorflow as tf tf.config.list_physical_devices('GPU') train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') train_datagen = ImageDataGenerator(rescale=1.0 / 255, zoom_range=0.4, horizontal_flip=True) training_set = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_set = test_datagen.flow_from_directory(test_dir, batch_size=15, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') training_set.class_indices def get_model(input_size, classes=2): model = tf.keras.models.Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=input_size)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(2, 2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01))) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) return model model_3 = get_model((row, col, 1), classes) model_3.summary() chk_path = 'model_3.h5' log_dir = 'checkpoint/logs/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') checkpoint = ModelCheckpoint(filepath=chk_path, save_best_only=True, verbose=1, mode='min', moniter='val_loss') earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, restore_best_weights=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=6, verbose=1, min_delta=0.0001) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) csv_logger = CSVLogger('training.log') callbacks = [checkpoint, reduce_lr, csv_logger] steps_per_epoch = training_set.n // training_set.batch_size validation_steps = test_set.n // test_set.batch_size hist = model_3.fit(x=training_set, validation_data=test_set, epochs=60, callbacks=callbacks, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps)
code
128012372/cell_13
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') train_datagen = ImageDataGenerator(rescale=1.0 / 255, zoom_range=0.4, horizontal_flip=True) training_set = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_set = test_datagen.flow_from_directory(test_dir, batch_size=15, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') training_set.class_indices
code
128012372/cell_9
[ "text_plain_output_1.png" ]
import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') test_count.transpose().plot(kind='bar')
code
128012372/cell_25
[ "text_plain_output_1.png" ]
from keras import regularizers from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau from keras.layers import Conv2D, Dense, BatchNormalization, Activation, Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam, RMSprop, SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import datetime import os import pandas as pd import tensorflow as tf tf.config.list_physical_devices('GPU') train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') train_datagen = ImageDataGenerator(rescale=1.0 / 255, zoom_range=0.4, horizontal_flip=True) training_set = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_set = test_datagen.flow_from_directory(test_dir, batch_size=15, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') training_set.class_indices def get_model(input_size, classes=2): model = tf.keras.models.Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=input_size)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(2, 2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01))) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) return model model_3 = get_model((row, col, 1), classes) model_3.summary() chk_path = 'model_3.h5' log_dir = 'checkpoint/logs/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') checkpoint = ModelCheckpoint(filepath=chk_path, save_best_only=True, verbose=1, mode='min', moniter='val_loss') earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, restore_best_weights=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=6, verbose=1, min_delta=0.0001) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) csv_logger = CSVLogger('training.log') callbacks = [checkpoint, reduce_lr, csv_logger] steps_per_epoch = training_set.n // training_set.batch_size validation_steps = test_set.n // test_set.batch_size hist = model_3.fit(x=training_set, validation_data=test_set, epochs=60, callbacks=callbacks, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) train_loss, train_accu = model_3.evaluate(training_set) test_loss, test_accu = model_3.evaluate(test_set) print('final train accuracy = {:.2f} , validation accuracy = {:.2f}'.format(train_accu * 100, test_accu * 100))
code
128012372/cell_23
[ "text_plain_output_1.png" ]
from keras import regularizers from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau from keras.layers import Conv2D, Dense, BatchNormalization, Activation, Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam, RMSprop, SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import datetime import matplotlib.pyplot as plt import os import pandas as pd import tensorflow as tf tf.config.list_physical_devices('GPU') train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') i = 1 for expression in os.listdir(train_dir): img = load_img(train_dir + expression + '/' + os.listdir(train_dir + expression)[1]) plt.axis('off') i += 1 train_datagen = ImageDataGenerator(rescale=1.0 / 255, zoom_range=0.4, horizontal_flip=True) training_set = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_set = test_datagen.flow_from_directory(test_dir, batch_size=15, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') training_set.class_indices def get_model(input_size, classes=2): model = tf.keras.models.Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=input_size)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(2, 2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01))) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) return model model_3 = get_model((row, col, 1), classes) model_3.summary() chk_path = 'model_3.h5' log_dir = 'checkpoint/logs/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') checkpoint = ModelCheckpoint(filepath=chk_path, save_best_only=True, verbose=1, mode='min', moniter='val_loss') earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, restore_best_weights=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=6, verbose=1, min_delta=0.0001) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) csv_logger = CSVLogger('training.log') callbacks = [checkpoint, reduce_lr, csv_logger] steps_per_epoch = training_set.n // training_set.batch_size validation_steps = test_set.n // test_set.batch_size hist = model_3.fit(x=training_set, validation_data=test_set, epochs=60, callbacks=callbacks, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) plt.figure(figsize=(14, 5)) plt.subplot(1, 2, 1) plt.plot(hist.history['accuracy']) plt.plot(hist.history['val_accuracy']) plt.title('Model 1 Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(['train', 'test'], loc='upper left') plt.subplot(1, 2, 2) plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('model 1 Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend(['train', 'test'], loc='upper left') plt.show()
code
128012372/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') print(train_count) print(test_count)
code
128012372/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import regularizers from keras.layers import Conv2D, Dense, BatchNormalization, Activation, Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam, RMSprop, SGD import os import pandas as pd import tensorflow as tf tf.config.list_physical_devices('GPU') train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') def get_model(input_size, classes=2): model = tf.keras.models.Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=input_size)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(2, 2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01))) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy']) return model model_3 = get_model((row, col, 1), classes) model_3.summary()
code
128012372/cell_8
[ "image_output_1.png" ]
import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') train_count.transpose().plot(kind='bar')
code
128012372/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img from keras.layers import Conv2D, Dense, BatchNormalization, Activation, Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam, RMSprop, SGD from keras import regularizers from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau import datetime import matplotlib.pyplot as plt from tensorflow.keras.utils import plot_model
code
128012372/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') print(classes)
code
128012372/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import matplotlib.pyplot as plt import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') plt.figure(figsize=(14, 22)) i = 1 for expression in os.listdir(train_dir): img = load_img(train_dir + expression + '/' + os.listdir(train_dir + expression)[1]) plt.subplot(1, 7, i) plt.imshow(img) plt.title(expression) plt.axis('off') i += 1 plt.show()
code
128012372/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img import os import pandas as pd train_dir = '/kaggle/input/happy-sad-7125/train/' test_dir = '/kaggle/input/happy-sad-7125/test/' row, col = (48, 48) classes = 2 def count_exp(path, set_): dict_ = {} for expression in os.listdir(path): dir_ = path + expression dict_[expression] = len(os.listdir(dir_)) df = pd.DataFrame(dict_, index=[set_]) return df train_count = count_exp(train_dir, 'train') test_count = count_exp(test_dir, 'test') train_datagen = ImageDataGenerator(rescale=1.0 / 255, zoom_range=0.4, horizontal_flip=True) training_set = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical') test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_set = test_datagen.flow_from_directory(test_dir, batch_size=15, target_size=(48, 48), shuffle=True, color_mode='grayscale', class_mode='categorical')
code
128012372/cell_5
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.config.list_physical_devices('GPU')
code
2017333/cell_21
[ "text_plain_output_1.png" ]
from subprocess import check_output print(check_output(['ls', '../working']).decode('utf8'))
code
2017333/cell_13
[ "text_html_output_1.png" ]
from sklearn import svm from sklearn import svm clf = svm.SVC() clf.fit(X_train, Y_train) clf.score(X_train, Y_train)
code
2017333/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') PassengerIds = test['PassengerId'] PassengerIds
code
2017333/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') train.head()
code
2017333/cell_11
[ "text_html_output_1.png" ]
X_train.head()
code
2017333/cell_19
[ "text_plain_output_1.png" ]
from sklearn import svm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') PassengerIds = test['PassengerId'] PassengerIds train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0) train['Age'] = train['Age'].fillna(np.mean(train['Age'])) train['Fare'] = train['Fare'].fillna(np.mean(train['Fare'])) from sklearn import svm clf = svm.SVC() clf.fit(X_train, Y_train) clf.score(X_train, Y_train) clf.fit(X_test, Y_test) clf.score(X_test, Y_test) test['Sex'] = test['Sex'].apply(lambda x: 1 if x == 'male' else 0) test['Age'] = test['Age'].fillna(np.mean(test['Age'])) test['Fare'] = test['Fare'].fillna(np.mean(test['Fare'])) test = test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']] results = clf.predict(test) submission_df = {'PassengerId': PassengerIds, 'Survived': results} submission = pd.DataFrame(submission_df) submission
code
2017333/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2017333/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0) train = train[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']] train.head()
code
2017333/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import svm from sklearn import svm clf = svm.SVC() clf.fit(X_train, Y_train) clf.score(X_train, Y_train) clf.fit(X_test, Y_test) clf.score(X_test, Y_test)
code
2017333/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code