path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90118084/cell_22
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv') columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale'] values = data.iloc[1:, :].values data = pd.DataFrame(values, columns=columns) for col in columns[2:]: data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float') data.sample(5) countries = {val: df for val, df in data.groupby('Country')} def repeated_measures_effect_size(country, col1, col2): col1, col2 = (countries[country][col1], countries[country][col2]) m1, m2 = (np.mean(col1), np.mean(col2)) s1, s2 = (np.std(col1), np.std(col2)) r = col1.corr(col2) s_z = np.sqrt(s1 ** 2 + s2 ** 2 - 2 * r * s1 * s2) s_rm = s_z / np.sqrt(2 * (1 - r)) return (m1 - m2) / s_rm effect_sizes_dying = {c: repeated_measures_effect_size(c, 'ProbDyingMale', 'ProbDyingFemale') for c in countries} effect_sizes_dying = dict(sorted(effect_sizes_dying.items(), key=lambda x: x[1])) for c in reversed(list(effect_sizes_dying.keys())[-5:]): print(c)
code
90118084/cell_27
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np import pandas as pd def visualize_word_counts(counts): wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='white', prefer_horizontal=0.95, width=2100, height=700, random_state=0) cloud = wc.generate_from_frequencies(counts) plt.axis('off') data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv') columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale'] values = data.iloc[1:, :].values data = pd.DataFrame(values, columns=columns) for col in columns[2:]: data[col] = data[col].map(lambda x: x.split('[')[0]).astype('float') data.sample(5) countries = {val: df for val, df in data.groupby('Country')} def repeated_measures_effect_size(country, col1, col2): col1, col2 = (countries[country][col1], countries[country][col2]) m1, m2 = (np.mean(col1), np.mean(col2)) s1, s2 = (np.std(col1), np.std(col2)) r = col1.corr(col2) s_z = np.sqrt(s1 ** 2 + s2 ** 2 - 2 * r * s1 * s2) s_rm = s_z / np.sqrt(2 * (1 - r)) return (m1 - m2) / s_rm plt.xticks(np.arange(-10, 41, 5)) country_gender_dying_corr = {} country_gender_suicide_corr = {} for val, df in data.groupby('Country'): corr_gender_dying = df['ProbDyingMale'].corr(df['ProbDyingFemale']) corr_gender_suicide = df['SuicideMale'].corr(df['SuicideFemale']) country_gender_dying_corr[val] = corr_gender_dying country_gender_suicide_corr[val] = corr_gender_suicide plt.figure(figsize=(15, 5)) plt.hist(country_gender_dying_corr.values(), bins=20, alpha=0.4, label='dying') plt.hist(country_gender_suicide_corr.values(), bins=20, alpha=0.4, label='suicide') plt.legend() plt.grid() plt.show()
code
33105771/cell_4
[ "text_plain_output_1.png" ]
import nltk import os import pandas as pd from datetime import datetime, date, timedelta import numpy as np import re import os import matplotlib.pyplot as plt import seaborn as sns import nltk nltk.download('stopwords') from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer import gensim from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split, GroupShuffleSplit from langdetect import detect from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize from keras.wrappers.scikit_learn import KerasClassifier from textblob import TextBlob os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
code
33105771/cell_6
[ "text_plain_output_1.png" ]
from langdetect import detect import pandas as pd npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv' raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1') df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category']) df['region'] = df['region'].replace('Newfoundland', 'Newfoundland and Labrador') num_rows_removed = len(raw_data) - len(df) regions = list(set(df.region.values)) num_cats = list(set(df.intervention_category.values)) num_interventions = len(num_cats) df['start_date'] = pd.to_datetime(df['start_date'], format='%Y-%m-%d') earliest_start_date = df['start_date'].min() latest_start_date = df['start_date'].max() num_days = latest_start_date - earliest_start_date merged_tweets_csv = '/kaggle/input/npi-twitterverse-april-30/tweets_to_intervention_category.source_urls.tsv' colnames = ['npi_record_id', 'intervention_category', 'oxford_government_response_category', 'source_url', 'id', 'conversation_id', 'created_at', 'date', 'time', 'timezone', 'user_id', 'username', 'name', 'place', 'tweet', 'mentions', 'urls', 'photos', 'replies_count', 'retweets_count', 'likes_count', 'hashtags', 'cashtags', 'link', 'retweet', 'quote_url', 'video', 'near', 'geo', 'source', 'user_rt_id', 'user_rt', 'retweet_id', 'reply_to', 'retweet_date', 'translate', 'trans_src', 'trans_dest'] tweets_df = pd.read_csv(merged_tweets_csv, encoding='utf-8', error_bad_lines=False, engine='python', names=colnames) tweets_df = tweets_df.dropna(how='any', subset=['npi_record_id', 'intervention_category', 'tweet']) data = [] for index, row in tweets_df.iterrows(): tweet = row['tweet'].strip() if tweet != '': language = '' try: language = detect(tweet) except: language = 'error' if language == 'en': data.append([row['intervention_category'], tweet]) tweets_df_en = pd.DataFrame(data, columns=['intervention_category', 'tweet']) print('Number of non-english tweets = {}'.format(len(tweets_df) - len(tweets_df_en))) print('Number of tweets collected = {}'.format(len(tweets_df_en)))
code
33105771/cell_19
[ "text_plain_output_1.png" ]
# download sentiment map !wget https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1048/Emoji_Sentiment_Data_v1.0.csv
code
33105771/cell_8
[ "text_html_output_2.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
ex1 = "Here's a wrap of the latest coronavirus news in Canada: 77 cases, one death, an outbreak in a B.C. nursing home and Ottawa asks provinces about their critical supply gaps. https://www.theglobeandmail.com/canada/article-bc-records-canadas-first-coronavirus-death/" ex2 = 'B.C. records Canada’s first coronavirus death http://dlvr.it/RRZPGL pic.twitter.com/pn8T4yumQJ' print('Example 1 = {}'.format(ex1)) print('Example 2 = {}'.format(ex2))
code
33105771/cell_3
[ "text_plain_output_1.png" ]
# download necessary packages !pip install langdetect !pip install emoji
code
33105771/cell_17
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from langdetect import detect from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from textblob import TextBlob import nltk import nltk import os import pandas as pd import re import re import pandas as pd from datetime import datetime, date, timedelta import numpy as np import re import os import matplotlib.pyplot as plt import seaborn as sns import nltk nltk.download('stopwords') from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer import gensim from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split, GroupShuffleSplit from langdetect import detect from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize from keras.wrappers.scikit_learn import KerasClassifier from textblob import TextBlob os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv' raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1') df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category']) df['region'] = df['region'].replace('Newfoundland', 'Newfoundland and Labrador') num_rows_removed = len(raw_data) - len(df) regions = list(set(df.region.values)) num_cats = list(set(df.intervention_category.values)) num_interventions = len(num_cats) df['start_date'] = pd.to_datetime(df['start_date'], format='%Y-%m-%d') earliest_start_date = df['start_date'].min() latest_start_date = df['start_date'].max() num_days = latest_start_date - earliest_start_date merged_tweets_csv = '/kaggle/input/npi-twitterverse-april-30/tweets_to_intervention_category.source_urls.tsv' colnames = ['npi_record_id', 'intervention_category', 'oxford_government_response_category', 'source_url', 'id', 'conversation_id', 'created_at', 'date', 'time', 'timezone', 'user_id', 'username', 'name', 'place', 'tweet', 'mentions', 'urls', 'photos', 'replies_count', 'retweets_count', 'likes_count', 'hashtags', 'cashtags', 'link', 'retweet', 'quote_url', 'video', 'near', 'geo', 'source', 'user_rt_id', 'user_rt', 'retweet_id', 'reply_to', 'retweet_date', 'translate', 'trans_src', 'trans_dest'] tweets_df = pd.read_csv(merged_tweets_csv, encoding='utf-8', error_bad_lines=False, engine='python', names=colnames) tweets_df = tweets_df.dropna(how='any', subset=['npi_record_id', 'intervention_category', 'tweet']) data = [] for index, row in tweets_df.iterrows(): tweet = row['tweet'].strip() if tweet != '': language = '' try: language = detect(tweet) except: language = 'error' if language == 'en': data.append([row['intervention_category'], tweet]) tweets_df_en = pd.DataFrame(data, columns=['intervention_category', 'tweet']) import re import nltk nltk.download('punkt') def tweet_preprocess(text): """Return tokenized text with rsemoved URLs, usernames, hashtags, weird characters, repeated characters, stop words, and numbers """ text = text.lower() text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))', 'URL', text) text = re.sub('@[A-Za-z0-9]+', 'USER', text) text = re.sub('#([^\\s]+)', '\\1', text) text = re.sub('[^a-zA-Z0-9-*. ]', ' ', text) words = word_tokenize(text) ignore = set(stopwords.words('english')) more_ignore = {'at', 'and', 'also', 'or', 'http', 'ca', 'www', 'https', 'com', 'twitter', 'html', 'news', 'link', 'positive', 'first', 'First', 'confirmed', 'confirm', 'confirms'} ignore.update(more_ignore) cleaned_words_tokens = [w for w in words if w not in ignore] cleaned_words_tokens = [w for w in cleaned_words_tokens if w.isalpha()] return cleaned_words_tokens def run_sentiment_analysis(tweets_df): tweets_df['sentiment'] = 0 for index, row in tweets_df.iterrows(): tokens = tweet_preprocess(row['tweet']) clean_text = ' '.join(tokens) analysis = TextBlob(row['tweet']) analysis_after_clean = TextBlob(clean_text) run_sentiment_analysis(tweets_df_en[:5])
code
33105771/cell_14
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import nltk import nltk import os import re import re import pandas as pd from datetime import datetime, date, timedelta import numpy as np import re import os import matplotlib.pyplot as plt import seaborn as sns import nltk nltk.download('stopwords') from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer import gensim from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split, GroupShuffleSplit from langdetect import detect from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize from keras.wrappers.scikit_learn import KerasClassifier from textblob import TextBlob os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' import re import nltk nltk.download('punkt') def tweet_preprocess(text): """Return tokenized text with rsemoved URLs, usernames, hashtags, weird characters, repeated characters, stop words, and numbers """ text = text.lower() text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))', 'URL', text) text = re.sub('@[A-Za-z0-9]+', 'USER', text) text = re.sub('#([^\\s]+)', '\\1', text) text = re.sub('[^a-zA-Z0-9-*. ]', ' ', text) words = word_tokenize(text) ignore = set(stopwords.words('english')) more_ignore = {'at', 'and', 'also', 'or', 'http', 'ca', 'www', 'https', 'com', 'twitter', 'html', 'news', 'link', 'positive', 'first', 'First', 'confirmed', 'confirm', 'confirms'} ignore.update(more_ignore) cleaned_words_tokens = [w for w in words if w not in ignore] cleaned_words_tokens = [w for w in cleaned_words_tokens if w.isalpha()] return cleaned_words_tokens
code
33105771/cell_22
[ "text_plain_output_1.png" ]
from langdetect import detect from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from textblob import TextBlob import emoji import nltk import nltk import os import pandas as pd import plotly.graph_objects as go import re import re import pandas as pd from datetime import datetime, date, timedelta import numpy as np import re import os import matplotlib.pyplot as plt import seaborn as sns import nltk nltk.download('stopwords') from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer import gensim from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split, GroupShuffleSplit from langdetect import detect from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize from keras.wrappers.scikit_learn import KerasClassifier from textblob import TextBlob os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv' raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1') df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category']) df['region'] = df['region'].replace('Newfoundland', 'Newfoundland and Labrador') num_rows_removed = len(raw_data) - len(df) regions = list(set(df.region.values)) num_cats = list(set(df.intervention_category.values)) num_interventions = len(num_cats) df['start_date'] = pd.to_datetime(df['start_date'], format='%Y-%m-%d') earliest_start_date = df['start_date'].min() latest_start_date = df['start_date'].max() num_days = latest_start_date - earliest_start_date merged_tweets_csv = '/kaggle/input/npi-twitterverse-april-30/tweets_to_intervention_category.source_urls.tsv' colnames = ['npi_record_id', 'intervention_category', 'oxford_government_response_category', 'source_url', 'id', 'conversation_id', 'created_at', 'date', 'time', 'timezone', 'user_id', 'username', 'name', 'place', 'tweet', 'mentions', 'urls', 'photos', 'replies_count', 'retweets_count', 'likes_count', 'hashtags', 'cashtags', 'link', 'retweet', 'quote_url', 'video', 'near', 'geo', 'source', 'user_rt_id', 'user_rt', 'retweet_id', 'reply_to', 'retweet_date', 'translate', 'trans_src', 'trans_dest'] tweets_df = pd.read_csv(merged_tweets_csv, encoding='utf-8', error_bad_lines=False, engine='python', names=colnames) tweets_df = tweets_df.dropna(how='any', subset=['npi_record_id', 'intervention_category', 'tweet']) data = [] for index, row in tweets_df.iterrows(): tweet = row['tweet'].strip() if tweet != '': language = '' try: language = detect(tweet) except: language = 'error' if language == 'en': data.append([row['intervention_category'], tweet]) tweets_df_en = pd.DataFrame(data, columns=['intervention_category', 'tweet']) import re import nltk nltk.download('punkt') def tweet_preprocess(text): """Return tokenized text with rsemoved URLs, usernames, hashtags, weird characters, repeated characters, stop words, and numbers """ text = text.lower() text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))', 'URL', text) text = re.sub('@[A-Za-z0-9]+', 'USER', text) text = re.sub('#([^\\s]+)', '\\1', text) text = re.sub('[^a-zA-Z0-9-*. ]', ' ', text) words = word_tokenize(text) ignore = set(stopwords.words('english')) more_ignore = {'at', 'and', 'also', 'or', 'http', 'ca', 'www', 'https', 'com', 'twitter', 'html', 'news', 'link', 'positive', 'first', 'First', 'confirmed', 'confirm', 'confirms'} ignore.update(more_ignore) cleaned_words_tokens = [w for w in words if w not in ignore] cleaned_words_tokens = [w for w in cleaned_words_tokens if w.isalpha()] return cleaned_words_tokens def run_sentiment_analysis(tweets_df): tweets_df['sentiment'] = 0 for index, row in tweets_df.iterrows(): tokens = tweet_preprocess(row['tweet']) clean_text = ' '.join(tokens) analysis = TextBlob(row['tweet']) analysis_after_clean = TextBlob(clean_text) import emoji emoji_sent_csv = 'Emoji_Sentiment_Data_v1.0.csv' emoji_data = pd.read_csv(emoji_sent_csv, encoding='ISO-8859-1') def extract_emojis(str): return ''.join((c for c in str if c in emoji.UNICODE_EMOJI)) def calc_emoji_sent(e): e_uc = '0x{:X}'.format(ord(e)).lower() count_pos = 0 count_neg = 0 count_neutral = 0 sr = emoji_data.loc[emoji_data['Unicode codepoint'] == e_uc.lower()] score = -100 if not sr.empty: oc = int(sr['Occurrences'].astype(int)) num_pos = int(sr['Positive'].astype(int)) num_neut = int(sr['Neutral'].astype(int)) num_neg = int(sr['Negative'].astype(int)) score = 1 * num_pos / oc + -1 * num_neg / oc + 0 * num_neut / oc return score def run_sentiment_analysis_mod(tweets_df): tweets_df['sentiment_score'] = 0.0 tweets_df['sentiment_class'] = '' for index, row in tweets_df.iterrows(): tokens = tweet_preprocess(row['tweet']) clean_text = ' '.join(tokens) analysis = TextBlob(row['tweet']) analysis_after_clean = TextBlob(clean_text) c_score = analysis_after_clean.sentiment[0] emojis_detected = extract_emojis(row['tweet']) avg_emoji_sent_score = 0 emoji_counts = 0 if emojis_detected: for e in emojis_detected: em_sent_score = calc_emoji_sent(e) if em_sent_score == -100: continue avg_emoji_sent_score += em_sent_score emoji_counts += 1 if emoji_counts > 0: avg_emoji_sent_score = avg_emoji_sent_score / emoji_counts score = 0.0 label = 'NEUTRAL' if avg_emoji_sent_score > 0.1: score = avg_emoji_sent_score label = 'POSITIVE' elif avg_emoji_sent_score < -0.1: score = avg_emoji_sent_score label = 'NEGATIVE' else: score = analysis_after_clean.sentiment[0] if score > 0.25: label = 'POSITIVE' elif score < -0.25: label = 'NEGATIVE' tweets_df.at[index, 'sentiment_score'] = score tweets_df.at[index, 'sentiment_class'] = label 'print("=============================")\n print(row["intervention_category"] + "\n")\n print(row[\'tweet\'])\n print(clean_text)\n print("Score (no clean) = {}".format(analysis.sentiment[0]))\n print("Score (clean) = {}".format(c_score))\n print("Final Score = {}".format(score))\n print(label)' return tweets_df mod_tweets_df = run_sentiment_analysis_mod(tweets_df) import plotly.graph_objects as go import plotly def split_data_by_class(tweets_df): total_tweets_by_cat = tweets_df.groupby('intervention_category')['id'].count().reset_index(name='count').sort_values('intervention_category', ascending=False) counts = tweets_df.groupby(['intervention_category', 'sentiment_class'])['id'].count().reset_index(name='count').sort_values('intervention_category', ascending=False) counts['proportion'] = 0.0 for index, row in counts.iterrows(): total_tweets = int(total_tweets_by_cat.loc[total_tweets_by_cat['intervention_category'] == row['intervention_category']]['count'].astype(int)) counts.at[index, 'proportion'] = row['count'] / total_tweets y = counts['intervention_category'].unique().tolist() fill_data = [] for ic in y: for sc in ['POSITIVE', 'NEUTRAL', 'NEGATIVE']: subset = counts[(counts.sentiment_class == sc) & (counts.intervention_category == ic)] if subset.empty: fill_data.append([ic, sc, 0, 0.0]) fill_data_df = pd.DataFrame(fill_data, columns=['intervention_category', 'sentiment_class', 'count', 'proportion']) full_counts = counts.append(fill_data_df).sort_values('intervention_category', ascending=False) return (full_counts, y) def plot(full_counts, y, measure): THRESH = 50 total_tweets_by_cat = tweets_df.groupby('intervention_category')['id'].count().reset_index(name='count').sort_values('intervention_category', ascending=False) if measure == 'proportion': y = total_tweets_by_cat[total_tweets_by_cat['count'] > THRESH]['intervention_category'].unique().tolist() full_counts = full_counts[full_counts.intervention_category.isin(y)] pos_counts = full_counts.loc[full_counts['sentiment_class'] == 'POSITIVE'] neg_counts = full_counts.loc[full_counts['sentiment_class'] == 'NEGATIVE'] neut_counts = full_counts.loc[full_counts['sentiment_class'] == 'NEUTRAL'] print('Mean {} for positive class: {}'.format(measure, round(pos_counts[measure].mean(), 2))) print('Mean {} for negative class: {}'.format(measure, round(neg_counts[measure].mean(), 2))) print('Range {} for positive class: {}-{}'.format(measure, round(pos_counts[measure].min(), 2), round(pos_counts[measure].max(), 2))) print('Range {} for negative class: {}-{}'.format(measure, round(neg_counts[measure].min(), 2), round(neg_counts[measure].max(), 2))) fig = go.Figure() fig.add_trace(go.Bar(y=y, x=pos_counts[measure], name='Positive', orientation='h', marker=dict(color='rgba(90, 191,165, 1.0)', line=dict(color='rgba(255, 255, 255, 1.0)', width=1)))) fig.add_trace(go.Bar(y=y, x=neg_counts[measure], name='Negative', orientation='h', marker=dict(color='rgba(230, 130, 130, 1.0)', line=dict(color='rgba(255, 255, 255, 1.0)', width=1)))) fig.add_trace(go.Bar(y=y, x=neut_counts[measure], name='Neutral', orientation='h', marker=dict(color='rgba(190, 203, 200, 1.0)', line=dict(color='rgba(255, 255, 255, 1.0)', width=1)))) fig.update_layout(width=800, height=1200, barmode='stack', template='plotly_white', bargap=0.5) fig.show() full_counts, y = split_data_by_class(mod_tweets_df) plot(full_counts, y, 'proportion') plot(full_counts, y, 'count')
code
33105771/cell_10
[ "text_plain_output_1.png" ]
from textblob import TextBlob ex1 = "Here's a wrap of the latest coronavirus news in Canada: 77 cases, one death, an outbreak in a B.C. nursing home and Ottawa asks provinces about their critical supply gaps. https://www.theglobeandmail.com/canada/article-bc-records-canadas-first-coronavirus-death/" ex2 = 'B.C. records Canada’s first coronavirus death http://dlvr.it/RRZPGL pic.twitter.com/pn8T4yumQJ' ex1_tb = TextBlob(ex1) ex1_ss = ex1_tb.sentiment[0] print('Example 1 has score={}'.format(ex1_ss)) ex2_tb = TextBlob(ex2) ex2_ss = ex2_tb.sentiment[0] print('Example 2 has score={}'.format(ex2_ss))
code
33105771/cell_12
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from textblob import TextBlob ex = 'first coronavirus death' ex_tb = TextBlob(ex) ex_ss = ex_tb.sentiment[0] print('{} with score={}'.format(ex, ex_ss)) ex = 'coronavirus death' ex_tb = TextBlob(ex) ex_ss = ex_tb.sentiment[0] print('{} with score={}'.format(ex, ex_ss))
code
33105771/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv' raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1') df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category']) df['region'] = df['region'].replace('Newfoundland', 'Newfoundland and Labrador') num_rows_removed = len(raw_data) - len(df) print('Number of rows removed: {}'.format(num_rows_removed)) regions = list(set(df.region.values)) print('Number of unique regions: {}'.format(len(regions))) num_cats = list(set(df.intervention_category.values)) num_interventions = len(num_cats) print('Number of unique intervention categories: {}'.format(len(num_cats))) df['start_date'] = pd.to_datetime(df['start_date'], format='%Y-%m-%d') earliest_start_date = df['start_date'].min() latest_start_date = df['start_date'].max() num_days = latest_start_date - earliest_start_date print('Analyzing from {} to {} ({} days)'.format(earliest_start_date.date(), latest_start_date.date(), num_days)) print('DONE READING DATA')
code
318372/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts()
code
318372/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts() comment[(comment.gid == 'EPH') & (comment.rid == '')]['name'].value_counts().head(10)
code
318372/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts() comment[(comment.gid == 'EPH') & (comment.rid != '')]['rname'].value_counts().head(10)
code
318372/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.head(4)
code
73081016/cell_13
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('http.*?([ ]|\\|\\|\\||$)', '', text).lower() text = re.sub('(:|;).', '', text) text = re.sub('[' + re.escape(str_punc) + ']', '', text) text = re.sub('(\\[|\\()*\\d+(\\]|\\))*', '', text) text = re.sub('[β€™β€˜β€œ\\.”…–]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text dataset.posts = pd.Series(dataset.posts.apply(clean)) dataset.index = dataset['type'] posts_vectorized = cv.fit_transform(dataset.posts) dtm = pd.DataFrame(data=posts_vectorized.toarray(), columns=cv.get_feature_names()) dtm.index = dataset['type'] dtm.iloc[:, -20000:-20016:-1] dtmT = dtm.transpose() dtmT.iloc[-20000:-20016:-1, :] cloud = [] for personality in dtmT.columns: topWords = list(dtmT[personality].sort_values(ascending=False).head(n_common_words).index) cloud.append({personality: topWords}) for index, personality in enumerate(dtmT.columns): wc.generate(dataset.posts[personality]) plt.imshow(wc, interpolation='bilinear') plt.title(personality) plt.savefig(personality + '.png')
code
73081016/cell_4
[ "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() dataset.head(16)
code
73081016/cell_8
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('http.*?([ ]|\\|\\|\\||$)', '', text).lower() text = re.sub('(:|;).', '', text) text = re.sub('[' + re.escape(str_punc) + ']', '', text) text = re.sub('(\\[|\\()*\\d+(\\]|\\))*', '', text) text = re.sub('[β€™β€˜β€œ\\.”…–]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text dataset.posts = pd.Series(dataset.posts.apply(clean)) dataset.index = dataset['type'] posts_vectorized = cv.fit_transform(dataset.posts) dtm = pd.DataFrame(data=posts_vectorized.toarray(), columns=cv.get_feature_names()) dtm.index = dataset['type'] dtm.iloc[:, -20000:-20016:-1]
code
73081016/cell_15
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('http.*?([ ]|\\|\\|\\||$)', '', text).lower() text = re.sub('(:|;).', '', text) text = re.sub('[' + re.escape(str_punc) + ']', '', text) text = re.sub('(\\[|\\()*\\d+(\\]|\\))*', '', text) text = re.sub('[β€™β€˜β€œ\\.”…–]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text dataset.posts = pd.Series(dataset.posts.apply(clean)) dataset.index = dataset['type'] posts_vectorized = cv.fit_transform(dataset.posts) dtm = pd.DataFrame(data=posts_vectorized.toarray(), columns=cv.get_feature_names()) dtm.index = dataset['type'] dtm.iloc[:, -20000:-20016:-1] dtmT = dtm.transpose() dtmT.iloc[-20000:-20016:-1, :] cloud = [] for personality in dtmT.columns: topWords = list(dtmT[personality].sort_values(ascending=False).head(n_common_words).index) cloud.append({personality: topWords}) for index, personality in enumerate(dtmT.columns): wc.generate(dataset.posts[personality]) n_of_types = 15 shared_words = [] all_words = [] for index, personality in enumerate(dtmT.columns): all_words.append(cloud[index][personality]) unique_words = np.unique(np.array(all_words)) for word in unique_words: counter = 0 for index, personality in enumerate(dtmT.columns): if word in cloud[index][personality]: counter += 1 if counter >= n_of_types: shared_words.append(word) shared_words
code
73081016/cell_10
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('http.*?([ ]|\\|\\|\\||$)', '', text).lower() text = re.sub('(:|;).', '', text) text = re.sub('[' + re.escape(str_punc) + ']', '', text) text = re.sub('(\\[|\\()*\\d+(\\]|\\))*', '', text) text = re.sub('[β€™β€˜β€œ\\.”…–]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text dataset.posts = pd.Series(dataset.posts.apply(clean)) dataset.index = dataset['type'] posts_vectorized = cv.fit_transform(dataset.posts) dtm = pd.DataFrame(data=posts_vectorized.toarray(), columns=cv.get_feature_names()) dtm.index = dataset['type'] dtm.iloc[:, -20000:-20016:-1] dtmT = dtm.transpose() dtmT.iloc[-20000:-20016:-1, :]
code
73081016/cell_12
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud import json import matplotlib.pyplot as plt import pandas as pd import re import string n_common_words = 50 wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') engstopwordsV2 = re.sub('[' + re.escape(string.punctuation) + ']', '', ' '.join(engstopwords)).split() with open('../input/mbtiadditionalwords/additional-stopwords.json') as file: additional_stopwords = json.load(file) engstopwords.extend(additional_stopwords) engstopwords = set(engstopwords).union(set(engstopwordsV2)) str_punc = string.punctuation filename = '../input/mbti-type/mbti_1.csv' cv = CountVectorizer(stop_words='english') plt.rcParams['figure.dpi'] = 300 wc = WordCloud(stopwords=engstopwords, background_color='white', colormap='Dark2', max_font_size=150, random_state=42, width=800, height=400) dataset = pd.read_csv(filename) dataset = dataset.groupby(['type'])['posts'].apply(lambda x: '. '.join(x)).reset_index() def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('http.*?([ ]|\\|\\|\\||$)', '', text).lower() text = re.sub('(:|;).', '', text) text = re.sub('[' + re.escape(str_punc) + ']', '', text) text = re.sub('(\\[|\\()*\\d+(\\]|\\))*', '', text) text = re.sub('[β€™β€˜β€œ\\.”…–]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text dataset.posts = pd.Series(dataset.posts.apply(clean)) dataset.index = dataset['type'] posts_vectorized = cv.fit_transform(dataset.posts) dtm = pd.DataFrame(data=posts_vectorized.toarray(), columns=cv.get_feature_names()) dtm.index = dataset['type'] dtm.iloc[:, -20000:-20016:-1] dtmT = dtm.transpose() dtmT.iloc[-20000:-20016:-1, :] cloud = [] for personality in dtmT.columns: topWords = list(dtmT[personality].sort_values(ascending=False).head(n_common_words).index) cloud.append({personality: topWords}) cloud[11]
code
34144563/cell_4
[ "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import numpy as np import pandas as pd import torch device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') a = 100 b = 100 dim_mesh = (a - 1) * (b - 1) bias_bo = True bias_bo_g = True bias_bo_d = True train_samples = 1200 end_samples = 1600 path = '../input/2stage-simon/f_set_nonl.npy' ffine_all = 10000 * np.load(path) ffine = ffine_all[:train_samples] nb_samples = np.shape(ffine)[0] nb_times = np.shape(ffine)[1] fcoarse_reshape = np.reshape(ffine, (nb_samples, nb_times, a - 1, b - 1)) fcoarse_reshape_torch_before = torch.tensor(fcoarse_reshape, dtype=torch.float32, device=device) ffine_test = ffine_all[train_samples:end_samples] nb_samples_test = np.shape(ffine_test)[0] nb_times_test = np.shape(ffine_test)[1] fcoarse_reshape_test = np.reshape(ffine_test, (nb_samples_test, nb_times_test, a - 1, b - 1)) fcoarse_reshape_test_torch_before = torch.tensor(fcoarse_reshape_test, dtype=torch.float32, device=device) path = '../input/cem100/cembasis_10.npy' _R_ = np.load(path) Rt = np.transpose(_R_) print('Rt shape', np.shape(Rt)) RtFh = [] for jx in tqdm(range(nb_samples)): RtFh_t = [] for ix in range(nb_times): RtFh_t.append(np.matmul(Rt, ffine[jx, ix, :])) RtFh.append(RtFh_t) print('np.shape(RtFh): R^t*F_h', np.shape(RtFh)) RtFh = np.array(RtFh) RtFh0 = RtFh[:, :, 0::3] Rtfh0_torch = torch.tensor(RtFh0, dtype=torch.float32, device=device) print('Rtfh0_torch', Rtfh0_torch.size()) nb_model_phy0 = Rtfh0_torch.size()[-1] print('nb_model_phy0', nb_model_phy0) RtFh1 = RtFh[:, :, 1::3] Rtfh1_torch = torch.tensor(RtFh1, dtype=torch.float32, device=device) print('Rtf1torch', Rtfh1_torch.size()) nb_model_phy1 = Rtfh1_torch.size()[-1] print('nb_model_phy1', nb_model_phy1) RtFh2 = RtFh[:, :, 2::3] Rtfh2_torch = torch.tensor(RtFh2, dtype=torch.float32, device=device) print('Rtfh2_torch', Rtfh2_torch.size()) nb_model_phy2 = Rtfh2_torch.size()[-1] print('nb_model_phy2', nb_model_phy2) Rtfh_torch = torch.tensor(RtFh, dtype=torch.float32, device=device) print('Rtfh_torch', Rtfh_torch.size()) nb_model_phy = Rtfh_torch.size()[-1] print('nb_model_phy', nb_model_phy) RtFh_test = [] for jx in tqdm(range(nb_samples_test)): RtFh_t = [] for ix in range(nb_times_test): RtFh_t.append(np.matmul(Rt, ffine_test[jx, ix, :])) RtFh_test.append(RtFh_t) print('np.shape(RtFh_test): R^t*F_h', np.shape(RtFh_test)) RtFh_test = np.array(RtFh_test) RtFh_test0 = RtFh_test[:, :, 0::3] Rtfh0_torch_test = torch.tensor(RtFh_test0, dtype=torch.float32, device=device) print('Rtfh0_torch_test', Rtfh0_torch_test.size()) nb_model_phy0_test = Rtfh0_torch_test.size()[-1] print('nb_model_phy0_test', nb_model_phy0_test) RtFh_test1 = RtFh_test[:, :, 1::3] Rtfh1_torch_test = torch.tensor(RtFh_test1, dtype=torch.float32, device=device) print('Rtfh1_torch_test', Rtfh1_torch_test.size()) nb_model_phy1_test = Rtfh1_torch_test.size()[-1] print('nb_model_phy1_test', nb_model_phy1_test) invRtR = LA.inv(np.matmul(Rt, _R_)) path = '../input/2stage-simon/sol_set_nonl.txt' u_f_all = 10000 * pd.read_csv(path, sep=' ', header=None).values print('u_f_all', np.shape(u_f_all)) u_f = u_f_all[:train_samples] print('u_f', np.shape(u_f)) u_H = [] for ix in range(nb_samples): u_H.append(np.matmul(invRtR, np.matmul(Rt, u_f[ix]))) u_H = np.array(u_H) print('u_H', np.shape(u_H)) nb_rdm = np.shape(u_H)[1] print('nb_rdm', nb_rdm) u_f_test = u_f_all[train_samples:end_samples] print('u_f_test', np.shape(u_f_test)) u_H_test = [] for ix in range(nb_samples_test): u_H_test.append(np.matmul(invRtR, np.matmul(Rt, u_f_test[ix]))) u_H_test = np.array(u_H_test) print('u_H_test', np.shape(u_H_test)) nb_rdm_test = np.shape(u_H_test)[1] print('nb_rdm_test', nb_rdm_test)
code
34144563/cell_6
[ "text_plain_output_1.png" ]
from torch import nn, optim from torch.nn.functional import softmax from tqdm import tqdm import numpy as np import pandas as pd import torch nb_takes = 15 nb_reduced = int(300 / nb_takes) nb_takes_phy = nb_takes nb_reduced_phy = nb_reduced device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') a = 100 b = 100 dim_mesh = (a - 1) * (b - 1) bias_bo = True bias_bo_g = True bias_bo_d = True train_samples = 1200 end_samples = 1600 path = '../input/2stage-simon/f_set_nonl.npy' ffine_all = 10000 * np.load(path) ffine = ffine_all[:train_samples] nb_samples = np.shape(ffine)[0] nb_times = np.shape(ffine)[1] fcoarse_reshape = np.reshape(ffine, (nb_samples, nb_times, a - 1, b - 1)) fcoarse_reshape_torch_before = torch.tensor(fcoarse_reshape, dtype=torch.float32, device=device) ffine_test = ffine_all[train_samples:end_samples] nb_samples_test = np.shape(ffine_test)[0] nb_times_test = np.shape(ffine_test)[1] fcoarse_reshape_test = np.reshape(ffine_test, (nb_samples_test, nb_times_test, a - 1, b - 1)) fcoarse_reshape_test_torch_before = torch.tensor(fcoarse_reshape_test, dtype=torch.float32, device=device) path = '../input/cem100/cembasis_10.npy' _R_ = np.load(path) Rt = np.transpose(_R_) RtFh = [] for jx in tqdm(range(nb_samples)): RtFh_t = [] for ix in range(nb_times): RtFh_t.append(np.matmul(Rt, ffine[jx, ix, :])) RtFh.append(RtFh_t) RtFh = np.array(RtFh) RtFh0 = RtFh[:, :, 0::3] Rtfh0_torch = torch.tensor(RtFh0, dtype=torch.float32, device=device) nb_model_phy0 = Rtfh0_torch.size()[-1] RtFh1 = RtFh[:, :, 1::3] Rtfh1_torch = torch.tensor(RtFh1, dtype=torch.float32, device=device) nb_model_phy1 = Rtfh1_torch.size()[-1] RtFh2 = RtFh[:, :, 2::3] Rtfh2_torch = torch.tensor(RtFh2, dtype=torch.float32, device=device) nb_model_phy2 = Rtfh2_torch.size()[-1] Rtfh_torch = torch.tensor(RtFh, dtype=torch.float32, device=device) nb_model_phy = Rtfh_torch.size()[-1] RtFh_test = [] for jx in tqdm(range(nb_samples_test)): RtFh_t = [] for ix in range(nb_times_test): RtFh_t.append(np.matmul(Rt, ffine_test[jx, ix, :])) RtFh_test.append(RtFh_t) RtFh_test = np.array(RtFh_test) RtFh_test0 = RtFh_test[:, :, 0::3] Rtfh0_torch_test = torch.tensor(RtFh_test0, dtype=torch.float32, device=device) nb_model_phy0_test = Rtfh0_torch_test.size()[-1] RtFh_test1 = RtFh_test[:, :, 1::3] Rtfh1_torch_test = torch.tensor(RtFh_test1, dtype=torch.float32, device=device) nb_model_phy1_test = Rtfh1_torch_test.size()[-1] invRtR = LA.inv(np.matmul(Rt, _R_)) path = '../input/2stage-simon/sol_set_nonl.txt' u_f_all = 10000 * pd.read_csv(path, sep=' ', header=None).values u_f = u_f_all[:train_samples] u_H = [] for ix in range(nb_samples): u_H.append(np.matmul(invRtR, np.matmul(Rt, u_f[ix]))) u_H = np.array(u_H) nb_rdm = np.shape(u_H)[1] u_f_test = u_f_all[train_samples:end_samples] u_H_test = [] for ix in range(nb_samples_test): u_H_test.append(np.matmul(invRtR, np.matmul(Rt, u_f_test[ix]))) u_H_test = np.array(u_H_test) nb_rdm_test = np.shape(u_H_test)[1] class OneHeadAttention(nn.Module): def __init__(self, d_model, d_reduced, bo): super().__init__() self.v_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() self.q_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() self.k_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() def forward(self, vv): v = self.v_linear(vv) k = self.k_linear(vv) q = self.q_linear(vv) qkt = torch.matmul(q, torch.transpose(k, 1, 2)) sm_qkt = softmax(qkt, dim=-1) out = torch.matmul(sm_qkt, v) return out model = OneHeadAttention(nb_model_phy, nb_reduced_phy, True) def init_weights(m): if type(m) == nn.Linear: m.bias.data.uniform_(-1 / 100000, 1 / 100000) model.apply(init_weights) out = model(Rtfh_torch) np.shape(out) nb_heads = 6 class MultiHeads(nn.Module): def __init__(self, d_model, d_reduced, d_head, bo): super().__init__() self.head1 = OneHeadAttention(d_model, d_reduced, bo) self.head2 = OneHeadAttention(d_model, d_reduced, bo) self.head3 = OneHeadAttention(d_model, d_reduced, bo) self.head4 = OneHeadAttention(d_model, d_reduced, bo) self.head5 = OneHeadAttention(d_model, d_reduced, bo) self.head6 = OneHeadAttention(d_model, d_reduced, bo) self.linear = nn.Linear(d_reduced * d_head, d_reduced, bias=bias_bo).cuda() def forward(self, v): out1 = self.head1(v) out2 = self.head2(v) out3 = self.head3(v) out4 = self.head4(v) out5 = self.head5(v) out6 = self.head6(v) concat_out = torch.cat((out1, out2, out3, out4, out5, out6), dim=-1) out = self.linear(concat_out) return out def init_weights(m): if type(m) == nn.Linear: m.bias.data.uniform_(-1 / 100000, 1 / 100000) model = MultiHeads(nb_model_phy, nb_reduced_phy, nb_heads, True) model.apply(init_weights) out = model(Rtfh_torch) print(out.size())
code
34144563/cell_2
[ "text_plain_output_1.png" ]
nb_takes = 15 nb_reduced = int(300 / nb_takes) print(nb_takes, nb_reduced) nb_takes_phy = nb_takes nb_reduced_phy = nb_reduced
code
34144563/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import torch device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') a = 100 b = 100 dim_mesh = (a - 1) * (b - 1) bias_bo = True bias_bo_g = True bias_bo_d = True train_samples = 1200 end_samples = 1600 path = '../input/2stage-simon/f_set_nonl.npy' ffine_all = 10000 * np.load(path) print('ffine_all', np.shape(ffine_all)) ffine = ffine_all[:train_samples] print('ffine', np.shape(ffine)) nb_samples = np.shape(ffine)[0] nb_times = np.shape(ffine)[1] fcoarse_reshape = np.reshape(ffine, (nb_samples, nb_times, a - 1, b - 1)) print('fcoarse_reshape', np.shape(fcoarse_reshape)) fcoarse_reshape_torch_before = torch.tensor(fcoarse_reshape, dtype=torch.float32, device=device) print('fcoarse_reshape_torch_before', np.shape(fcoarse_reshape_torch_before)) ffine_test = ffine_all[train_samples:end_samples] print('ffine_test', np.shape(ffine_test)) nb_samples_test = np.shape(ffine_test)[0] nb_times_test = np.shape(ffine_test)[1] fcoarse_reshape_test = np.reshape(ffine_test, (nb_samples_test, nb_times_test, a - 1, b - 1)) fcoarse_reshape_test_torch_before = torch.tensor(fcoarse_reshape_test, dtype=torch.float32, device=device) print('fcoarse_reshape_test_torch_before', np.shape(fcoarse_reshape_test_torch_before))
code
34144563/cell_5
[ "text_plain_output_1.png" ]
from torch import nn, optim from torch.nn.functional import softmax from tqdm import tqdm import numpy as np import pandas as pd import torch nb_takes = 15 nb_reduced = int(300 / nb_takes) nb_takes_phy = nb_takes nb_reduced_phy = nb_reduced device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') a = 100 b = 100 dim_mesh = (a - 1) * (b - 1) bias_bo = True bias_bo_g = True bias_bo_d = True train_samples = 1200 end_samples = 1600 path = '../input/2stage-simon/f_set_nonl.npy' ffine_all = 10000 * np.load(path) ffine = ffine_all[:train_samples] nb_samples = np.shape(ffine)[0] nb_times = np.shape(ffine)[1] fcoarse_reshape = np.reshape(ffine, (nb_samples, nb_times, a - 1, b - 1)) fcoarse_reshape_torch_before = torch.tensor(fcoarse_reshape, dtype=torch.float32, device=device) ffine_test = ffine_all[train_samples:end_samples] nb_samples_test = np.shape(ffine_test)[0] nb_times_test = np.shape(ffine_test)[1] fcoarse_reshape_test = np.reshape(ffine_test, (nb_samples_test, nb_times_test, a - 1, b - 1)) fcoarse_reshape_test_torch_before = torch.tensor(fcoarse_reshape_test, dtype=torch.float32, device=device) path = '../input/cem100/cembasis_10.npy' _R_ = np.load(path) Rt = np.transpose(_R_) RtFh = [] for jx in tqdm(range(nb_samples)): RtFh_t = [] for ix in range(nb_times): RtFh_t.append(np.matmul(Rt, ffine[jx, ix, :])) RtFh.append(RtFh_t) RtFh = np.array(RtFh) RtFh0 = RtFh[:, :, 0::3] Rtfh0_torch = torch.tensor(RtFh0, dtype=torch.float32, device=device) nb_model_phy0 = Rtfh0_torch.size()[-1] RtFh1 = RtFh[:, :, 1::3] Rtfh1_torch = torch.tensor(RtFh1, dtype=torch.float32, device=device) nb_model_phy1 = Rtfh1_torch.size()[-1] RtFh2 = RtFh[:, :, 2::3] Rtfh2_torch = torch.tensor(RtFh2, dtype=torch.float32, device=device) nb_model_phy2 = Rtfh2_torch.size()[-1] Rtfh_torch = torch.tensor(RtFh, dtype=torch.float32, device=device) nb_model_phy = Rtfh_torch.size()[-1] RtFh_test = [] for jx in tqdm(range(nb_samples_test)): RtFh_t = [] for ix in range(nb_times_test): RtFh_t.append(np.matmul(Rt, ffine_test[jx, ix, :])) RtFh_test.append(RtFh_t) RtFh_test = np.array(RtFh_test) RtFh_test0 = RtFh_test[:, :, 0::3] Rtfh0_torch_test = torch.tensor(RtFh_test0, dtype=torch.float32, device=device) nb_model_phy0_test = Rtfh0_torch_test.size()[-1] RtFh_test1 = RtFh_test[:, :, 1::3] Rtfh1_torch_test = torch.tensor(RtFh_test1, dtype=torch.float32, device=device) nb_model_phy1_test = Rtfh1_torch_test.size()[-1] invRtR = LA.inv(np.matmul(Rt, _R_)) path = '../input/2stage-simon/sol_set_nonl.txt' u_f_all = 10000 * pd.read_csv(path, sep=' ', header=None).values u_f = u_f_all[:train_samples] u_H = [] for ix in range(nb_samples): u_H.append(np.matmul(invRtR, np.matmul(Rt, u_f[ix]))) u_H = np.array(u_H) nb_rdm = np.shape(u_H)[1] u_f_test = u_f_all[train_samples:end_samples] u_H_test = [] for ix in range(nb_samples_test): u_H_test.append(np.matmul(invRtR, np.matmul(Rt, u_f_test[ix]))) u_H_test = np.array(u_H_test) nb_rdm_test = np.shape(u_H_test)[1] class OneHeadAttention(nn.Module): def __init__(self, d_model, d_reduced, bo): super().__init__() self.v_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() self.q_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() self.k_linear = nn.Linear(d_model, d_reduced, bias=bias_bo).cuda() def forward(self, vv): v = self.v_linear(vv) k = self.k_linear(vv) q = self.q_linear(vv) qkt = torch.matmul(q, torch.transpose(k, 1, 2)) sm_qkt = softmax(qkt, dim=-1) out = torch.matmul(sm_qkt, v) return out model = OneHeadAttention(nb_model_phy, nb_reduced_phy, True) def init_weights(m): if type(m) == nn.Linear: m.bias.data.uniform_(-1 / 100000, 1 / 100000) model.apply(init_weights) out = model(Rtfh_torch) np.shape(out)
code
74059064/cell_13
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() train_gen = ImageDataGenerator(rescale=1.0 / RESCALE, shear_range=SHEAR_RANGE, vertical_flip=VERTICAL_FLIP, horizontal_flip=HORIZONTAL_FLIP, zoom_range=ZOOM_RANGE, validation_split=VALIDATION_SPLIT) train_set = train_gen.flow_from_directory(TRAIN_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE)
code
74059064/cell_25
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import cv2 import matplotlib.pyplot as plt import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() normal_img = cv2.imread(NORMAL_DIR) pneumonia_img = cv2.imread(PNEUMONIA_DIR) normal_img = cv2.resize(normal_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) pneumonia_img = cv2.resize(pneumonia_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) train_gen = ImageDataGenerator(rescale=1.0 / RESCALE, shear_range=SHEAR_RANGE, vertical_flip=VERTICAL_FLIP, horizontal_flip=HORIZONTAL_FLIP, zoom_range=ZOOM_RANGE, validation_split=VALIDATION_SPLIT) train_set = train_gen.flow_from_directory(TRAIN_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) val_gen = ImageDataGenerator(rescale=1.0 / RESCALE) val_set = val_gen.flow_from_directory(VAL_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=CONV1_FILTERS, kernel_size=CONV1_KERNEL, input_shape=[TARGET_WIDTH, TARGET_HEIGHT, COLOR_CHANNEL])) model.add(tf.keras.layers.Activation(CONV1_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL1_SIZE, strides=POOL1_STRIDE)) model.add(tf.keras.layers.Conv2D(filters=CONV2_FILTERS, kernel_size=CONV2_KERNEL)) model.add(tf.keras.layers.Activation(CONV2_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL2_SIZE, strides=POOL2_STRIDE)) model.add(tf.keras.layers.Dropout(DROPOUT)) model.add(tf.keras.layers.Conv2D(filters=CONV3_FILTERS, kernel_size=CONV3_KERNEL)) model.add(tf.keras.layers.Activation(CONV3_ACTIVATION)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=FC_UNITS, activation=FC_ACTIVATION)) model.add(tf.keras.layers.Dense(units=OUTPUT_UNITS, activation=OUTPUT_ACTIVATION)) model.compile(optimizer=OPTIMIZER, loss=LOSS_CLASS, metrics=[AUC_METRICS, ACCURACY_METRICS, RECALL_METRICS]) model.summary() hist = model.fit(x=train_set, validation_data=val_set, epochs=EPOCH) key_list = [] for key in hist.history.keys(): key_list.append(key) sample_img = cv2.imread(TEST_IMAGE_DIR) sample_img = cv2.resize(sample_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) plt.imshow(sample_img) plt.title(PNEUMONIA_TITLE) plt.show()
code
74059064/cell_6
[ "text_plain_output_1.png" ]
import cv2 import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() normal_img = cv2.imread(NORMAL_DIR) pneumonia_img = cv2.imread(PNEUMONIA_DIR) print('Normal: {} Pneumonia: {}'.format(normal_img.shape, pneumonia_img.shape))
code
74059064/cell_19
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() train_gen = ImageDataGenerator(rescale=1.0 / RESCALE, shear_range=SHEAR_RANGE, vertical_flip=VERTICAL_FLIP, horizontal_flip=HORIZONTAL_FLIP, zoom_range=ZOOM_RANGE, validation_split=VALIDATION_SPLIT) train_set = train_gen.flow_from_directory(TRAIN_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) val_gen = ImageDataGenerator(rescale=1.0 / RESCALE) val_set = val_gen.flow_from_directory(VAL_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=CONV1_FILTERS, kernel_size=CONV1_KERNEL, input_shape=[TARGET_WIDTH, TARGET_HEIGHT, COLOR_CHANNEL])) model.add(tf.keras.layers.Activation(CONV1_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL1_SIZE, strides=POOL1_STRIDE)) model.add(tf.keras.layers.Conv2D(filters=CONV2_FILTERS, kernel_size=CONV2_KERNEL)) model.add(tf.keras.layers.Activation(CONV2_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL2_SIZE, strides=POOL2_STRIDE)) model.add(tf.keras.layers.Dropout(DROPOUT)) model.add(tf.keras.layers.Conv2D(filters=CONV3_FILTERS, kernel_size=CONV3_KERNEL)) model.add(tf.keras.layers.Activation(CONV3_ACTIVATION)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=FC_UNITS, activation=FC_ACTIVATION)) model.add(tf.keras.layers.Dense(units=OUTPUT_UNITS, activation=OUTPUT_ACTIVATION)) model.compile(optimizer=OPTIMIZER, loss=LOSS_CLASS, metrics=[AUC_METRICS, ACCURACY_METRICS, RECALL_METRICS]) model.summary() hist = model.fit(x=train_set, validation_data=val_set, epochs=EPOCH)
code
74059064/cell_18
[ "text_plain_output_1.png" ]
import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=CONV1_FILTERS, kernel_size=CONV1_KERNEL, input_shape=[TARGET_WIDTH, TARGET_HEIGHT, COLOR_CHANNEL])) model.add(tf.keras.layers.Activation(CONV1_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL1_SIZE, strides=POOL1_STRIDE)) model.add(tf.keras.layers.Conv2D(filters=CONV2_FILTERS, kernel_size=CONV2_KERNEL)) model.add(tf.keras.layers.Activation(CONV2_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL2_SIZE, strides=POOL2_STRIDE)) model.add(tf.keras.layers.Dropout(DROPOUT)) model.add(tf.keras.layers.Conv2D(filters=CONV3_FILTERS, kernel_size=CONV3_KERNEL)) model.add(tf.keras.layers.Activation(CONV3_ACTIVATION)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=FC_UNITS, activation=FC_ACTIVATION)) model.add(tf.keras.layers.Dense(units=OUTPUT_UNITS, activation=OUTPUT_ACTIVATION)) model.compile(optimizer=OPTIMIZER, loss=LOSS_CLASS, metrics=[AUC_METRICS, ACCURACY_METRICS, RECALL_METRICS]) model.summary()
code
74059064/cell_28
[ "image_output_1.png" ]
from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() train_gen = ImageDataGenerator(rescale=1.0 / RESCALE, shear_range=SHEAR_RANGE, vertical_flip=VERTICAL_FLIP, horizontal_flip=HORIZONTAL_FLIP, zoom_range=ZOOM_RANGE, validation_split=VALIDATION_SPLIT) train_set = train_gen.flow_from_directory(TRAIN_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) val_gen = ImageDataGenerator(rescale=1.0 / RESCALE) val_set = val_gen.flow_from_directory(VAL_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=CONV1_FILTERS, kernel_size=CONV1_KERNEL, input_shape=[TARGET_WIDTH, TARGET_HEIGHT, COLOR_CHANNEL])) model.add(tf.keras.layers.Activation(CONV1_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL1_SIZE, strides=POOL1_STRIDE)) model.add(tf.keras.layers.Conv2D(filters=CONV2_FILTERS, kernel_size=CONV2_KERNEL)) model.add(tf.keras.layers.Activation(CONV2_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL2_SIZE, strides=POOL2_STRIDE)) model.add(tf.keras.layers.Dropout(DROPOUT)) model.add(tf.keras.layers.Conv2D(filters=CONV3_FILTERS, kernel_size=CONV3_KERNEL)) model.add(tf.keras.layers.Activation(CONV3_ACTIVATION)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=FC_UNITS, activation=FC_ACTIVATION)) model.add(tf.keras.layers.Dense(units=OUTPUT_UNITS, activation=OUTPUT_ACTIVATION)) model.compile(optimizer=OPTIMIZER, loss=LOSS_CLASS, metrics=[AUC_METRICS, ACCURACY_METRICS, RECALL_METRICS]) model.summary() hist = model.fit(x=train_set, validation_data=val_set, epochs=EPOCH) test_sample = image.load_img(TEST_IMAGE_DIR, target_size=(TARGET_WIDTH, TARGET_HEIGHT)) test_sample = image.img_to_array(test_sample) test_sample = np.expand_dims(test_sample, axis=0) result = model.predict(test_sample) if result[0][0] == 1: print(PNEUMONIA_TITLE) else: print(NORMAL_TITLE)
code
74059064/cell_14
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() val_gen = ImageDataGenerator(rescale=1.0 / RESCALE) val_set = val_gen.flow_from_directory(VAL_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE)
code
74059064/cell_22
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import cv2 import matplotlib.pyplot as plt import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() normal_img = cv2.imread(NORMAL_DIR) pneumonia_img = cv2.imread(PNEUMONIA_DIR) normal_img = cv2.resize(normal_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) pneumonia_img = cv2.resize(pneumonia_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) train_gen = ImageDataGenerator(rescale=1.0 / RESCALE, shear_range=SHEAR_RANGE, vertical_flip=VERTICAL_FLIP, horizontal_flip=HORIZONTAL_FLIP, zoom_range=ZOOM_RANGE, validation_split=VALIDATION_SPLIT) train_set = train_gen.flow_from_directory(TRAIN_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) val_gen = ImageDataGenerator(rescale=1.0 / RESCALE) val_set = val_gen.flow_from_directory(VAL_DIR, batch_size=BATCH_SIZE, target_size=(TARGET_WIDTH, TARGET_HEIGHT), class_mode=CLASS_MODE) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=CONV1_FILTERS, kernel_size=CONV1_KERNEL, input_shape=[TARGET_WIDTH, TARGET_HEIGHT, COLOR_CHANNEL])) model.add(tf.keras.layers.Activation(CONV1_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL1_SIZE, strides=POOL1_STRIDE)) model.add(tf.keras.layers.Conv2D(filters=CONV2_FILTERS, kernel_size=CONV2_KERNEL)) model.add(tf.keras.layers.Activation(CONV2_ACTIVATION)) model.add(tf.keras.layers.MaxPool2D(pool_size=POOL2_SIZE, strides=POOL2_STRIDE)) model.add(tf.keras.layers.Dropout(DROPOUT)) model.add(tf.keras.layers.Conv2D(filters=CONV3_FILTERS, kernel_size=CONV3_KERNEL)) model.add(tf.keras.layers.Activation(CONV3_ACTIVATION)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=FC_UNITS, activation=FC_ACTIVATION)) model.add(tf.keras.layers.Dense(units=OUTPUT_UNITS, activation=OUTPUT_ACTIVATION)) model.compile(optimizer=OPTIMIZER, loss=LOSS_CLASS, metrics=[AUC_METRICS, ACCURACY_METRICS, RECALL_METRICS]) model.summary() hist = model.fit(x=train_set, validation_data=val_set, epochs=EPOCH) key_list = [] for key in hist.history.keys(): key_list.append(key) plt.figure(figsize=(20, 3)) plt.subplot(1, 4, 1) plt.suptitle(SUPTITLE) plt.ylabel(LOSS_LABEL) plt.plot(hist.history[key_list[0]], color='b', label=TRAIN_LOSS_LABEL) plt.plot(hist.history[key_list[4]], color='r', label=VALIDATION_LOSS_LABEL) plt.legend(loc=UPPER_RIGHT) plt.subplot(1, 4, 2) plt.ylabel(ACCURACY_LABEL) plt.plot(hist.history[key_list[2]], color='b', label=TRAIN_ACCURACY_LABEL) plt.plot(hist.history[key_list[6]], color='r', label=VALIDATION_ACCURACY_LABEL) plt.legend(loc=LOWER_RIGHT) plt.subplot(1, 4, 3) plt.ylabel(AUC_LABEL) plt.plot(hist.history[key_list[1]], color='b', label=TRAIN_AUC_LABEL) plt.plot(hist.history[key_list[5]], color='r', label=VALIDATION_AUC_LABEL) plt.legend(loc=LOWER_RIGHT) plt.subplot(1, 4, 4) plt.ylabel(RECALL_LABEL) plt.plot(hist.history[key_list[3]], color='b', label=TRAIN_RECALL_LABEL) plt.plot(hist.history[key_list[7]], color='r', label=VALIDATION_RECALL_LABEL) plt.legend(loc=LOWER_RIGHT) plt.show()
code
74059064/cell_10
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import tensorflow as tf ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/' TRAIN_DIR = ROOT + 'train' TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg' VAL_DIR = ROOT + 'test' NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg' PNEUMONIA_DIR = ROOT + 'train/PNEUMONIA/B59DD164-51D5-40DF-A926-6A42DD52EBE8.jpeg' RESIZE_WIDTH = 256 RESIZE_HEIGHT = 256 NORMAL_TITLE = 'Normal' PNEUMONIA_TITLE = 'Pneumonia' BATCH_SIZE = 32 TARGET_WIDTH = 64 TARGET_HEIGHT = 64 CLASS_MODE = 'binary' RESCALE = 255 SHEAR_RANGE = 0.25 VERTICAL_FLIP = False HORIZONTAL_FLIP = True ZOOM_RANGE = 0.25 VALIDATION_SPLIT = 0.15 EPOCH = 20 SUPTITLE = 'Train vs Validation' LOSS_LABEL = 'Loss' ACCURACY_LABEL = 'Accuracy' AUC_LABEL = 'ROC-AUC' RECALL_LABEL = 'Recall' TRAIN_LOSS_LABEL = 'Training Loss' VALIDATION_LOSS_LABEL = 'Validation Loss' TRAIN_ACCURACY_LABEL = 'Training Accuracy' VALIDATION_ACCURACY_LABEL = 'Validation Accuracy' TRAIN_AUC_LABEL = 'Training AUC' VALIDATION_AUC_LABEL = 'Validation AUC' TRAIN_RECALL_LABEL = 'Training Recall' VALIDATION_RECALL_LABEL = 'Validation Recall' UPPER_RIGHT = 'upper right' LOWER_RIGHT = 'lower right' COLOR_CHANNEL = 3 CONV1_FILTERS = 48 CONV1_KERNEL = 3 CONV1_ACTIVATION = 'relu' POOL1_SIZE = 3 POOL1_STRIDE = 1 CONV2_FILTERS = 32 CONV2_KERNEL = 3 CONV2_ACTIVATION = 'relu' DROPOUT = 0.3 POOL2_SIZE = 2 POOL2_STRIDE = 1 CONV3_FILTERS = 16 CONV3_KERNEL = 3 CONV3_ACTIVATION = 'relu' FC_ACTIVATION = 'relu' FC_UNITS = 128 OUTPUT_ACTIVATION = 'sigmoid' OUTPUT_UNITS = 1 OPTIMIZER = 'adam' LOSS_CLASS = 'binary_crossentropy' ACCURACY_METRICS = 'accuracy' AUC_METRICS = tf.keras.metrics.AUC() RECALL_METRICS = tf.keras.metrics.Recall() normal_img = cv2.imread(NORMAL_DIR) pneumonia_img = cv2.imread(PNEUMONIA_DIR) normal_img = cv2.resize(normal_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) pneumonia_img = cv2.resize(pneumonia_img, (RESIZE_WIDTH, RESIZE_HEIGHT)) plt.subplot(1, 2, 1) plt.imshow(normal_img) plt.title(NORMAL_TITLE) plt.subplot(1, 2, 2) plt.imshow(pneumonia_img) plt.title(PNEUMONIA_TITLE) plt.show()
code
90111632/cell_9
[ "image_output_1.png" ]
from nltk.corpus import stopwords from textblob import Word import matplotlib.pyplot as plt import nltk import pandas as pd import pandas as pd import seaborn as sns import tweepy consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0' consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk' access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8' access_token_secret = 'e4F74LVCQ7BWHZ0HPPTF4Tz1laeFJ0a341LPcLQ3jpqvX' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.search_tweets(q='BTC', lang='en', count=200) def hashtag_df(tweets): import pandas as pd id_list = [tweet.id for tweet in tweets] dataframe = pd.DataFrame(id_list, columns=['id']) dataframe['user'] = [tweet.author.screen_name for tweet in tweets] dataframe['text'] = [tweet.text for tweet in tweets] dataframe['hashtags'] = [tweet.entities.get('hashtags') for tweet in tweets] return dataframe df = hashtag_df(tweets) df = df.drop_duplicates('user', keep='first') df.drop('id', axis=1, inplace=True) df.index = df['user'] df.drop('user', axis=1, inplace=True) df['text'] = df['text'].apply(lambda x: ' '.join((x.lower() for x in x.split()))) df['text'] = df['text'].str.replace('[^\\w\\s]', '') df['text'] = df['text'].str.replace('\\d', '') import nltk nltk.download('stopwords') from nltk.corpus import stopwords sw = stopwords.words('english') df['text'] = df['text'].apply(lambda x: ' '.join((x for x in x.split() if x not in sw))) from textblob import Word nltk.download('wordnet') df['text'] = df['text'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) df['text'] = df['text'].str.replace('rt', '') freq_df = df['text'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() freq_df.columns = ['Words', 'Frequency'] freq_df = freq_df.sort_values(by='Frequency', ascending=False) freq_df = freq_df[freq_df.Frequency > freq_df.Frequency.mean() + freq_df.Frequency.std()] plt.figure(figsize=(10, 10)) sns.set(font_scale=1) sns.barplot(x='Frequency', y='Words', data=freq_df.sort_values(by='Frequency', ascending=False)) plt.title('Frequency') plt.tight_layout() plt.show()
code
90111632/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import tweepy consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0' consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk' access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8' access_token_secret = 'e4F74LVCQ7BWHZ0HPPTF4Tz1laeFJ0a341LPcLQ3jpqvX' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.search_tweets(q='BTC', lang='en', count=200) def hashtag_df(tweets): import pandas as pd id_list = [tweet.id for tweet in tweets] dataframe = pd.DataFrame(id_list, columns=['id']) dataframe['user'] = [tweet.author.screen_name for tweet in tweets] dataframe['text'] = [tweet.text for tweet in tweets] dataframe['hashtags'] = [tweet.entities.get('hashtags') for tweet in tweets] return dataframe df = hashtag_df(tweets) df = df.drop_duplicates('user', keep='first') df.drop('id', axis=1, inplace=True) df.index = df['user'] df.drop('user', axis=1, inplace=True) df.head()
code
90111632/cell_1
[ "text_plain_output_1.png" ]
!pip install tweepy
code
90111632/cell_7
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from textblob import Word import nltk import pandas as pd import pandas as pd import tweepy consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0' consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk' access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8' access_token_secret = 'e4F74LVCQ7BWHZ0HPPTF4Tz1laeFJ0a341LPcLQ3jpqvX' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.search_tweets(q='BTC', lang='en', count=200) def hashtag_df(tweets): import pandas as pd id_list = [tweet.id for tweet in tweets] dataframe = pd.DataFrame(id_list, columns=['id']) dataframe['user'] = [tweet.author.screen_name for tweet in tweets] dataframe['text'] = [tweet.text for tweet in tweets] dataframe['hashtags'] = [tweet.entities.get('hashtags') for tweet in tweets] return dataframe df = hashtag_df(tweets) df = df.drop_duplicates('user', keep='first') df.drop('id', axis=1, inplace=True) df.index = df['user'] df.drop('user', axis=1, inplace=True) df['text'] = df['text'].apply(lambda x: ' '.join((x.lower() for x in x.split()))) df['text'] = df['text'].str.replace('[^\\w\\s]', '') df['text'] = df['text'].str.replace('\\d', '') import nltk nltk.download('stopwords') from nltk.corpus import stopwords sw = stopwords.words('english') df['text'] = df['text'].apply(lambda x: ' '.join((x for x in x.split() if x not in sw))) from textblob import Word nltk.download('wordnet') df['text'] = df['text'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) df['text'] = df['text'].str.replace('rt', '')
code
90111632/cell_8
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from textblob import Word import nltk import pandas as pd import pandas as pd import tweepy consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0' consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk' access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8' access_token_secret = 'e4F74LVCQ7BWHZ0HPPTF4Tz1laeFJ0a341LPcLQ3jpqvX' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.search_tweets(q='BTC', lang='en', count=200) def hashtag_df(tweets): import pandas as pd id_list = [tweet.id for tweet in tweets] dataframe = pd.DataFrame(id_list, columns=['id']) dataframe['user'] = [tweet.author.screen_name for tweet in tweets] dataframe['text'] = [tweet.text for tweet in tweets] dataframe['hashtags'] = [tweet.entities.get('hashtags') for tweet in tweets] return dataframe df = hashtag_df(tweets) df = df.drop_duplicates('user', keep='first') df.drop('id', axis=1, inplace=True) df.index = df['user'] df.drop('user', axis=1, inplace=True) df['text'] = df['text'].apply(lambda x: ' '.join((x.lower() for x in x.split()))) df['text'] = df['text'].str.replace('[^\\w\\s]', '') df['text'] = df['text'].str.replace('\\d', '') import nltk nltk.download('stopwords') from nltk.corpus import stopwords sw = stopwords.words('english') df['text'] = df['text'].apply(lambda x: ' '.join((x for x in x.split() if x not in sw))) from textblob import Word nltk.download('wordnet') df['text'] = df['text'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) df['text'] = df['text'].str.replace('rt', '') freq_df = df['text'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() freq_df.columns = ['Words', 'Frequency'] freq_df = freq_df.sort_values(by='Frequency', ascending=False) freq_df = freq_df[freq_df.Frequency > freq_df.Frequency.mean() + freq_df.Frequency.std()] freq_df.head()
code
17135958/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from keras import optimizers from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.layers.noise import GaussianNoise, GaussianDropout from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l1,l2,l1_l2 import glob import matplotlib.pyplot as plt import numpy as np import os import xml.etree.ElementTree as ET root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) def bounding_box(image): bpath = root_annots + str(breed_map[image.split('_')[0]]) + '/' + str(image.split('.')[0]) tree = ET.parse(bpath) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) return (xmin, ymin, xmax, ymax) num_rows = 5 num_cols = 9 num_images_to_show = num_rows * num_cols selected_images = np.random.choice(all_images, size=num_images_to_show, replace=False) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) plt.axis('off') num_images_in_dataset = 16384 image_dimention = 64 resize_shape = (image_dimention, image_dimention) selected_images = np.random.choice(all_images, size=num_images_in_dataset, replace=False) image_dataset_4D_matrix = np.zeros((image_dimention, image_dimention, 3, num_images_in_dataset), dtype=np.uint8) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) resized_image = tform.resize(np.array(cropped_image), resize_shape, preserve_range=True).astype(np.uint8) image_dataset_4D_matrix[:, :, :, k] = resized_image from keras.models import Model from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from keras.layers.normalization import BatchNormalization from keras.layers.noise import GaussianNoise, GaussianDropout from keras.regularizers import l1, l2, l1_l2 from keras import optimizers kernel_reg = 1e-07 activity_reg = 1e-07 input_image = Input(shape=(64, 64, 3), name='input_image') x = Conv2D(32, (3, 3), activation='relu', padding='valid', name='encoder_conv1')(input_image) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool1')(x) x = BatchNormalization(name='batchnorm_2')(x) x = Conv2D(128, (3, 3), activation='relu', padding='valid', name='encoder_conv2')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool2')(x) x = BatchNormalization(name='batchnorm_3')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv3')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool3')(x) x = BatchNormalization(name='batchnorm_4')(x) x = Conv2D(1024, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv4')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool4')(x) x = BatchNormalization(name='batchnorm_5')(x) x = Conv2D(2048, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv5')(x) x = MaxPooling2D((3, 3), padding='same', name='encoder_pool5')(x) x = BatchNormalization(name='batchnorm_6')(x) image_representation = Conv2D(2048, (1, 1), activation='relu', kernel_regularizer=l2(kernel_reg), activity_regularizer=l1(activity_reg), name='encoder_fc6')(x) image_representation = GaussianDropout(rate=0.1)(image_representation) image_representation = GaussianNoise(stddev=0.1)(image_representation) encoder = Model(input_image, image_representation, name='encoder') external_image_representation = Input(shape=(1, 1, 2048), name='extenral_image_rep') x = UpSampling2D((5, 5))(external_image_representation) x = BatchNormalization(name='decoder_BN_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv1')(x) x = UpSampling2D((3, 3), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv2')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_3')(x) x = Conv2D(256, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv3')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_4')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv4')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_5')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv5')(x) output_image = Conv2D(3, (3, 3), activation='linear', padding='same', name='generated_image')(x) decoder = Model(external_image_representation, output_image, name='decoder') autoencoder = Model(input_image, decoder(encoder(input_image))) autoencoder.summary() image_dataset_4D_matrix normlized_X = image_dataset_4D_matrix.astype(np.float32) normlized_X = normlized_X - normlized_X.mean(axis=3, keepdims=True) normlized_X = normlized_X / normlized_X.std(axis=3, keepdims=True) normlized_X = np.transpose(normlized_X, [3, 0, 1, 2]) num_epochs = 64 batch_size = 32 learning_rate = 0.0003 loss_to_use = 'mse' optimizer_to_use = optimizers.Nadam(lr=learning_rate) autoencoder.compile(optimizer=optimizer_to_use, loss=loss_to_use) history = autoencoder.fit(x=normlized_X, y=normlized_X, batch_size=batch_size, epochs=num_epochs, validation_split=0.125)
code
17135958/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image import glob import matplotlib.pyplot as plt import numpy as np import os import xml.etree.ElementTree as ET root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) def bounding_box(image): bpath = root_annots + str(breed_map[image.split('_')[0]]) + '/' + str(image.split('.')[0]) tree = ET.parse(bpath) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) return (xmin, ymin, xmax, ymax) num_rows = 5 num_cols = 9 num_images_to_show = num_rows * num_cols selected_images = np.random.choice(all_images, size=num_images_to_show, replace=False) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) plt.axis('off') num_images_in_dataset = 16384 image_dimention = 64 resize_shape = (image_dimention, image_dimention) selected_images = np.random.choice(all_images, size=num_images_in_dataset, replace=False) image_dataset_4D_matrix = np.zeros((image_dimention, image_dimention, 3, num_images_in_dataset), dtype=np.uint8) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) resized_image = tform.resize(np.array(cropped_image), resize_shape, preserve_range=True).astype(np.uint8) image_dataset_4D_matrix[:, :, :, k] = resized_image print('finished collecting dataset')
code
17135958/cell_6
[ "text_plain_output_1.png" ]
from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.layers.noise import GaussianNoise, GaussianDropout from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l1,l2,l1_l2 from keras.models import Model from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from keras.layers.normalization import BatchNormalization from keras.layers.noise import GaussianNoise, GaussianDropout from keras.regularizers import l1, l2, l1_l2 from keras import optimizers kernel_reg = 1e-07 activity_reg = 1e-07 input_image = Input(shape=(64, 64, 3), name='input_image') x = Conv2D(32, (3, 3), activation='relu', padding='valid', name='encoder_conv1')(input_image) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool1')(x) x = BatchNormalization(name='batchnorm_2')(x) x = Conv2D(128, (3, 3), activation='relu', padding='valid', name='encoder_conv2')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool2')(x) x = BatchNormalization(name='batchnorm_3')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv3')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool3')(x) x = BatchNormalization(name='batchnorm_4')(x) x = Conv2D(1024, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv4')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool4')(x) x = BatchNormalization(name='batchnorm_5')(x) x = Conv2D(2048, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv5')(x) x = MaxPooling2D((3, 3), padding='same', name='encoder_pool5')(x) x = BatchNormalization(name='batchnorm_6')(x) image_representation = Conv2D(2048, (1, 1), activation='relu', kernel_regularizer=l2(kernel_reg), activity_regularizer=l1(activity_reg), name='encoder_fc6')(x) image_representation = GaussianDropout(rate=0.1)(image_representation) image_representation = GaussianNoise(stddev=0.1)(image_representation) encoder = Model(input_image, image_representation, name='encoder') external_image_representation = Input(shape=(1, 1, 2048), name='extenral_image_rep') x = UpSampling2D((5, 5))(external_image_representation) x = BatchNormalization(name='decoder_BN_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv1')(x) x = UpSampling2D((3, 3), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv2')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_3')(x) x = Conv2D(256, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv3')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_4')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv4')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_5')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv5')(x) output_image = Conv2D(3, (3, 3), activation='linear', padding='same', name='generated_image')(x) decoder = Model(external_image_representation, output_image, name='decoder') autoencoder = Model(input_image, decoder(encoder(input_image))) autoencoder.summary()
code
17135958/cell_2
[ "image_output_1.png" ]
import glob import os root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') print(f'Total images : {len(all_images)}') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') print(f'Total annotation : {len(annotation)}') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) print(f'Total Breeds : {len(breed_map)}')
code
17135958/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image import glob import matplotlib.pyplot as plt import numpy as np import os import xml.etree.ElementTree as ET root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) def bounding_box(image): bpath = root_annots + str(breed_map[image.split('_')[0]]) + '/' + str(image.split('.')[0]) tree = ET.parse(bpath) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) return (xmin, ymin, xmax, ymax) num_rows = 5 num_cols = 9 num_images_to_show = num_rows * num_cols selected_images = np.random.choice(all_images, size=num_images_to_show, replace=False) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) plt.axis('off') num_images_in_dataset = 16384 image_dimention = 64 resize_shape = (image_dimention, image_dimention) selected_images = np.random.choice(all_images, size=num_images_in_dataset, replace=False) image_dataset_4D_matrix = np.zeros((image_dimention, image_dimention, 3, num_images_in_dataset), dtype=np.uint8) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) resized_image = tform.resize(np.array(cropped_image), resize_shape, preserve_range=True).astype(np.uint8) image_dataset_4D_matrix[:, :, :, k] = resized_image image_dataset_4D_matrix normlized_X = image_dataset_4D_matrix.astype(np.float32) print(normlized_X.shape) normlized_X = normlized_X - normlized_X.mean(axis=3, keepdims=True) print(normlized_X.shape) normlized_X = normlized_X / normlized_X.std(axis=3, keepdims=True) print(normlized_X.shape) normlized_X = np.transpose(normlized_X, [3, 0, 1, 2]) print(normlized_X.shape)
code
17135958/cell_3
[ "text_plain_output_1.png" ]
from PIL import Image import glob import matplotlib.pyplot as plt import numpy as np import os import xml.etree.ElementTree as ET root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) def bounding_box(image): bpath = root_annots + str(breed_map[image.split('_')[0]]) + '/' + str(image.split('.')[0]) tree = ET.parse(bpath) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) return (xmin, ymin, xmax, ymax) num_rows = 5 num_cols = 9 num_images_to_show = num_rows * num_cols selected_images = np.random.choice(all_images, size=num_images_to_show, replace=False) plt.figure(figsize=(30, 16)) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) plt.subplot(num_rows, num_cols, k + 1) plt.imshow(cropped_image) plt.axis('off')
code
17135958/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image from keras import optimizers from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.layers.noise import GaussianNoise, GaussianDropout from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l1,l2,l1_l2 import glob import matplotlib.pyplot as plt import numpy as np import os import xml.etree.ElementTree as ET root_images = '../input/all-dogs/all-dogs/' root_annots = '../input/annotation/Annotation/' all_images = os.listdir('../input/all-dogs/all-dogs/') breeds = glob.glob('../input/annotation/Annotation/*') annotation = [] for b in breeds: annotation += glob.glob(b + '/*') breed_map = {} for annot in annotation: breed = annot.split('/')[-2] index = breed.split('-')[0] breed_map.setdefault(index, breed) def bounding_box(image): bpath = root_annots + str(breed_map[image.split('_')[0]]) + '/' + str(image.split('.')[0]) tree = ET.parse(bpath) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) return (xmin, ymin, xmax, ymax) num_rows = 5 num_cols = 9 num_images_to_show = num_rows * num_cols selected_images = np.random.choice(all_images, size=num_images_to_show, replace=False) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) plt.axis('off') num_images_in_dataset = 16384 image_dimention = 64 resize_shape = (image_dimention, image_dimention) selected_images = np.random.choice(all_images, size=num_images_in_dataset, replace=False) image_dataset_4D_matrix = np.zeros((image_dimention, image_dimention, 3, num_images_in_dataset), dtype=np.uint8) for k, image_filename in enumerate(selected_images): bbox = bounding_box(image_filename) orig_image = Image.open(os.path.join(root_images, image_filename)) cropped_image = orig_image.crop(bbox) resized_image = tform.resize(np.array(cropped_image), resize_shape, preserve_range=True).astype(np.uint8) image_dataset_4D_matrix[:, :, :, k] = resized_image from keras.models import Model from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from keras.layers.normalization import BatchNormalization from keras.layers.noise import GaussianNoise, GaussianDropout from keras.regularizers import l1, l2, l1_l2 from keras import optimizers kernel_reg = 1e-07 activity_reg = 1e-07 input_image = Input(shape=(64, 64, 3), name='input_image') x = Conv2D(32, (3, 3), activation='relu', padding='valid', name='encoder_conv1')(input_image) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool1')(x) x = BatchNormalization(name='batchnorm_2')(x) x = Conv2D(128, (3, 3), activation='relu', padding='valid', name='encoder_conv2')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool2')(x) x = BatchNormalization(name='batchnorm_3')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv3')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool3')(x) x = BatchNormalization(name='batchnorm_4')(x) x = Conv2D(1024, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv4')(x) x = MaxPooling2D((2, 2), padding='same', name='encoder_pool4')(x) x = BatchNormalization(name='batchnorm_5')(x) x = Conv2D(2048, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='encoder_conv5')(x) x = MaxPooling2D((3, 3), padding='same', name='encoder_pool5')(x) x = BatchNormalization(name='batchnorm_6')(x) image_representation = Conv2D(2048, (1, 1), activation='relu', kernel_regularizer=l2(kernel_reg), activity_regularizer=l1(activity_reg), name='encoder_fc6')(x) image_representation = GaussianDropout(rate=0.1)(image_representation) image_representation = GaussianNoise(stddev=0.1)(image_representation) encoder = Model(input_image, image_representation, name='encoder') external_image_representation = Input(shape=(1, 1, 2048), name='extenral_image_rep') x = UpSampling2D((5, 5))(external_image_representation) x = BatchNormalization(name='decoder_BN_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv1')(x) x = UpSampling2D((3, 3), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv2')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_3')(x) x = Conv2D(256, (3, 3), activation='relu', padding='valid', kernel_regularizer=l2(kernel_reg), name='decoder_conv3')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_4')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv4')(x) x = UpSampling2D((2, 2), interpolation='bilinear')(x) x = BatchNormalization(name='decoder_BN_5')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(kernel_reg), name='decoder_conv5')(x) output_image = Conv2D(3, (3, 3), activation='linear', padding='same', name='generated_image')(x) decoder = Model(external_image_representation, output_image, name='decoder') autoencoder = Model(input_image, decoder(encoder(input_image))) autoencoder.summary() image_dataset_4D_matrix normlized_X = image_dataset_4D_matrix.astype(np.float32) normlized_X = normlized_X - normlized_X.mean(axis=3, keepdims=True) normlized_X = normlized_X / normlized_X.std(axis=3, keepdims=True) normlized_X = np.transpose(normlized_X, [3, 0, 1, 2]) num_epochs = 64 batch_size = 32 learning_rate = 0.0003 loss_to_use = 'mse' optimizer_to_use = optimizers.Nadam(lr=learning_rate) autoencoder.compile(optimizer=optimizer_to_use, loss=loss_to_use) history = autoencoder.fit(x=normlized_X, y=normlized_X, batch_size=batch_size, epochs=num_epochs, validation_split=0.125) epoch_number = np.arange(1, num_epochs + 1) plt.figure(figsize=(12, 6)) plt.plot(epoch_number, history.history['loss'], 'b') plt.plot(epoch_number, history.history['val_loss'], 'g') plt.legend(['train', 'valid'], fontsize=18)
code
130000382/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv') csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'}) csv['rating'] = csv['rating'] - 1 reviews = csv['review'].tolist() ratings = csv['rating'].tolist() features = {} top200 = sorted(features, key=features.get, reverse=True)[:1000] top400 = sorted(features, key=features.get, reverse=True)[:400] train_test_split = 0.8 reviews_train = reviews[:round(train_test_split * len(reviews))] reviews_test = reviews[round(train_test_split * len(reviews)):] ratings_train = ratings[:round(train_test_split * len(ratings))] ratings_test = ratings[round(train_test_split * len(ratings)):] M200 = [] M400 = [] for review in reviews_train: c = Counter([review[i:i + 3] for i in range(len(review) - 2)]) M200.append([c[g3] for g3 in top200]) M400.append([c[g3] for g3 in top400]) M200 = np.array(M200) M400 = np.array(M400) M200_test = [] M400_test = [] for review in reviews_test: c = Counter([review[i:i + 3] for i in range(len(review) - 2)]) M200_test.append([c[g3] for g3 in top200]) M400_test.append([c[g3] for g3 in top400]) M200_test = np.array(M200_test) M400_test = np.array(M400_test) M200_test.shape
code
130000382/cell_2
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os from collections import Counter from sklearn import preprocessing import torch from torch import nn from torchvision import transforms, datasets
code
130000382/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130000382/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv') csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'}) csv['rating'] = csv['rating'] - 1 reviews = csv['review'].tolist() ratings = csv['rating'].tolist() features = {} train_test_split = 0.8 reviews_train = reviews[:round(train_test_split * len(reviews))] reviews_test = reviews[round(train_test_split * len(reviews)):] ratings_train = ratings[:round(train_test_split * len(ratings))] ratings_test = ratings[round(train_test_split * len(ratings)):] print('Train: {} reviews, {} ratings'.format(len(reviews_train), len(ratings_train))) print('Test: {} reviews, {} ratings'.format(len(reviews_test), len(ratings_test)))
code
130000382/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv') csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'}) csv['rating'] = csv['rating'] - 1 print('Length: {}'.format(len(csv))) csv.head(10)
code
130000382/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from collections import Counter from sklearn import preprocessing from torch import nn import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv') csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'}) csv['rating'] = csv['rating'] - 1 reviews = csv['review'].tolist() ratings = csv['rating'].tolist() features = {} top200 = sorted(features, key=features.get, reverse=True)[:1000] top400 = sorted(features, key=features.get, reverse=True)[:400] train_test_split = 0.8 reviews_train = reviews[:round(train_test_split * len(reviews))] reviews_test = reviews[round(train_test_split * len(reviews)):] ratings_train = ratings[:round(train_test_split * len(ratings))] ratings_test = ratings[round(train_test_split * len(ratings)):] M200 = [] M400 = [] for review in reviews_train: c = Counter([review[i:i + 3] for i in range(len(review) - 2)]) M200.append([c[g3] for g3 in top200]) M400.append([c[g3] for g3 in top400]) M200 = np.array(M200) M400 = np.array(M400) M200_test = [] M400_test = [] for review in reviews_test: c = Counter([review[i:i + 3] for i in range(len(review) - 2)]) M200_test.append([c[g3] for g3 in top200]) M400_test.append([c[g3] for g3 in top400]) M200_test = np.array(M200_test) M400_test = np.array(M400_test) scl = preprocessing.StandardScaler() M200_s = torch.tensor(scl.fit_transform(M200)) M400_s = torch.tensor(scl.fit_transform(M400)) M200_test_s = torch.tensor(scl.fit_transform(M200_test)) M400_test_s = torch.tensor(scl.fit_transform(M400_test)) ratings_train = torch.tensor(ratings_train).long() ratings_test = torch.tensor(ratings_test).long() model200 = nn.Sequential(nn.Linear(1000, 512), nn.ReLU(), nn.Dropout(0.8), nn.Linear(512, 32), nn.ReLU(), nn.Dropout(0.8), nn.Linear(32, 5)) learning_rate = 0.001 loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model200.parameters(), lr=learning_rate) epochs = 200 for epoch in range(epochs): model200.train() pred = model200(M200_s.float()) loss = loss_fn(pred, ratings_train) optimizer.zero_grad() loss.backward() optimizer.step() model200.eval() if epoch % 30 == 0: train = (model200(M200_s.data.float()).argmax(1) == ratings_train).sum().item() / len(M200) test = (model200(M200_test_s.data.float()).argmax(1) == ratings_test).sum().item() / len(M200_test) print('Epoch {}.'.format(epoch + 1)) print('Loss: {:.3f}'.format(loss.item())) print('Train Score: {:.3f}'.format(train)) print('Test Score: {:.3f}\n'.format(test))
code
106201252/cell_4
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix from sklearn.neighbors import KNeighborsClassifier import pandas as pd train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') train_size = round(0.8 * len(train_data)) val_size = round(0.2 * len(train_data)) train_features = train_data[:train_size] val_features = train_data[train_size:] train_labels = train_features.pop('label') val_labels = val_features.pop('label') model = KNeighborsClassifier(n_neighbors=3) train_matrix = csr_matrix(train_features.values) model.fit(train_matrix, train_labels.values) print('Accuracy:') print(model.score(val_features.values, val_labels.values))
code
106201252/cell_3
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix from sklearn.neighbors import KNeighborsClassifier import pandas as pd train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') train_size = round(0.8 * len(train_data)) val_size = round(0.2 * len(train_data)) train_features = train_data[:train_size] val_features = train_data[train_size:] train_labels = train_features.pop('label') val_labels = val_features.pop('label') model = KNeighborsClassifier(n_neighbors=3) train_matrix = csr_matrix(train_features.values) model.fit(train_matrix, train_labels.values)
code
106201252/cell_5
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from scipy.sparse import csr_matrix from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import pandas as pd import random train_data = pd.read_csv('../input/digit-recognizer/train.csv') test_data = pd.read_csv('../input/digit-recognizer/test.csv') train_size = round(0.8 * len(train_data)) val_size = round(0.2 * len(train_data)) train_features = train_data[:train_size] val_features = train_data[train_size:] train_labels = train_features.pop('label') val_labels = val_features.pop('label') model = KNeighborsClassifier(n_neighbors=3) train_matrix = csr_matrix(train_features.values) model.fit(train_matrix, train_labels.values) def show_image(img): plt.figure() plt.imshow(img, cmap=plt.cm.binary) plt.colorbar() plt.grid(False) plt.show() def predict(model, image_num): image = test_data.iloc[image_num, 0:].values.reshape(28, 28) image_matrix = csr_matrix(test_data.iloc[image_num, 0:].values) class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] prediction = model.predict(image_matrix) print('Prediction: ' + str(prediction[0])) show_image(image) def make_n_predictions(model, n): for i in range(n): random_image = random.randint(0, 27999) predict(model, random_image) make_n_predictions(model, 5)
code
122251379/cell_13
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF
code
122251379/cell_9
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D X = A - B X
code
122251379/cell_20
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop() A.pop()
code
122251379/cell_26
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop() A.pop() A.remove(100) H = A.discard(4) H A.update(B) A
code
122251379/cell_2
[ "text_plain_output_1.png" ]
s = set() type(s) s = {'INDIA', 'SRILANKA', 'PAKISTAN'} type(s)
code
122251379/cell_19
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop()
code
122251379/cell_1
[ "text_plain_output_1.png" ]
s = set() type(s)
code
122251379/cell_7
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D
code
122251379/cell_18
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A
code
122251379/cell_28
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop() A.pop() A.remove(100) H = A.discard(4) H A.update(B) A.clear() A
code
122251379/cell_16
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A
code
122251379/cell_24
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop() A.pop() A.remove(100) H = A.discard(4) H A
code
122251379/cell_14
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF sym_dif = B ^ A sym_dif
code
122251379/cell_22
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF SYM_DIF = A.symmetric_difference(B) SYM_DIF A.add(100) A.pop() A.pop() A.remove(100) A
code
122251379/cell_10
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D Y = B - A Y
code
122251379/cell_12
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C D = A.union(B) D = A | B D = B | A D SYM_DIF = A ^ B SYM_DIF
code
122251379/cell_5
[ "text_plain_output_1.png" ]
A = {1, 2, 3, 4} B = {3, 4, 5, 6} C = A.intersection(B) C = A & B C
code
2001469/cell_9
[ "image_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC, NuSVC from sklearn.tree import DecisionTreeClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') files_listing = test_data.PassengerId test_labels = pd.read_csv('../input/gender_submission.csv') labels_test = test_labels.values labels_test = labels_test[:, 1] test_data = test_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) test_data.Age = test_data.Age.fillna(np.mean(train_data.Age)) test_data.Sex = test_data.Sex.apply(lambda x: 1 if x == 'male' else 0) test_data = test_data.values test_data = test_data[:, 1:] features_test = test_data train_data = train_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) train_data['Age'] = train_data['Age'].fillna(np.mean(train_data.Age)) train_data['Sex'] = train_data['Sex'].apply(lambda x: 1 if x == 'male' else 0) features_all = train_data.values labels_all = features_all[:, 1] features_all = features_all[:, 2:] from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC, NuSVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis classifiers = [KNeighborsClassifier(), AdaBoostClassifier(), RandomForestClassifier(), GaussianNB(), LinearDiscriminantAnalysis(), GradientBoostingClassifier(), DecisionTreeClassifier()] uni, cnt = np.unique(labels_test, return_counts=True) res_cols = ['Name', 'Accuracy'] result = pd.DataFrame(columns=res_cols) for clf in classifiers: clf.fit(features_all, labels_all) name = clf.__class__.__name__ pred_train = clf.predict(features_test) acc = accuracy_score(labels_test, pred_train) frame = pd.DataFrame([[name, acc * 100]], columns=res_cols) result = result.append(frame) classifiers = [LinearSVC(), NuSVC(), SVC(kernel='rbf', C=0.25), SVC(kernel='linear', C=0.25), SVC(kernel='rbf', C=1), SVC(kernel='linear', C=1), SVC(kernel='rbf', C=5), SVC(kernel='linear', C=5)] uni, cnt = np.unique(labels_test, return_counts=True) print('Actual Value of the result ', dict(zip(uni, cnt))) for clf in classifiers: clf.fit(features_all, labels_all) print('-' * 50) print(name) pred_train = clf.predict(features_test) acc = accuracy_score(labels_test, pred_train) print('Result ->', acc * 100, '%') print('-' * 50)
code
2001469/cell_6
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') files_listing = test_data.PassengerId test_labels = pd.read_csv('../input/gender_submission.csv') labels_test = test_labels.values labels_test = labels_test[:, 1] test_data = test_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) test_data.Age = test_data.Age.fillna(np.mean(train_data.Age)) test_data.Sex = test_data.Sex.apply(lambda x: 1 if x == 'male' else 0) test_data = test_data.values test_data = test_data[:, 1:] features_test = test_data train_data = train_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) train_data['Age'] = train_data['Age'].fillna(np.mean(train_data.Age)) train_data['Sex'] = train_data['Sex'].apply(lambda x: 1 if x == 'male' else 0) train_data.head()
code
2001469/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2001469/cell_8
[ "text_plain_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') files_listing = test_data.PassengerId test_labels = pd.read_csv('../input/gender_submission.csv') labels_test = test_labels.values labels_test = labels_test[:, 1] test_data = test_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) test_data.Age = test_data.Age.fillna(np.mean(train_data.Age)) test_data.Sex = test_data.Sex.apply(lambda x: 1 if x == 'male' else 0) test_data = test_data.values test_data = test_data[:, 1:] features_test = test_data train_data = train_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) train_data['Age'] = train_data['Age'].fillna(np.mean(train_data.Age)) train_data['Sex'] = train_data['Sex'].apply(lambda x: 1 if x == 'male' else 0) features_all = train_data.values labels_all = features_all[:, 1] features_all = features_all[:, 2:] from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC, NuSVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis classifiers = [KNeighborsClassifier(), AdaBoostClassifier(), RandomForestClassifier(), GaussianNB(), LinearDiscriminantAnalysis(), GradientBoostingClassifier(), DecisionTreeClassifier()] uni, cnt = np.unique(labels_test, return_counts=True) res_cols = ['Name', 'Accuracy'] result = pd.DataFrame(columns=res_cols) print('Actual Value of the result ', dict(zip(uni, cnt))) for clf in classifiers: clf.fit(features_all, labels_all) print('-' * 50) name = clf.__class__.__name__ print(name) pred_train = clf.predict(features_test) acc = accuracy_score(labels_test, pred_train) print('Result ->', acc * 100, '%') frame = pd.DataFrame([[name, acc * 100]], columns=res_cols) result = result.append(frame) print('-' * 50)
code
2001469/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') files_listing = test_data.PassengerId test_labels = pd.read_csv('../input/gender_submission.csv') train_data.head()
code
2001469/cell_10
[ "text_html_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns #Visualizations train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') files_listing = test_data.PassengerId test_labels = pd.read_csv('../input/gender_submission.csv') labels_test = test_labels.values labels_test = labels_test[:, 1] test_data = test_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) test_data.Age = test_data.Age.fillna(np.mean(train_data.Age)) test_data.Sex = test_data.Sex.apply(lambda x: 1 if x == 'male' else 0) test_data = test_data.values test_data = test_data[:, 1:] features_test = test_data train_data = train_data.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1) train_data['Age'] = train_data['Age'].fillna(np.mean(train_data.Age)) train_data['Sex'] = train_data['Sex'].apply(lambda x: 1 if x == 'male' else 0) features_all = train_data.values labels_all = features_all[:, 1] features_all = features_all[:, 2:] from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC, NuSVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis classifiers = [KNeighborsClassifier(), AdaBoostClassifier(), RandomForestClassifier(), GaussianNB(), LinearDiscriminantAnalysis(), GradientBoostingClassifier(), DecisionTreeClassifier()] uni, cnt = np.unique(labels_test, return_counts=True) res_cols = ['Name', 'Accuracy'] result = pd.DataFrame(columns=res_cols) for clf in classifiers: clf.fit(features_all, labels_all) name = clf.__class__.__name__ pred_train = clf.predict(features_test) acc = accuracy_score(labels_test, pred_train) frame = pd.DataFrame([[name, acc * 100]], columns=res_cols) result = result.append(frame) sns.set_color_codes('muted') sns.barplot(x='Accuracy', y='Name', data=result, color='g') plt.xlabel('Accuracy') plt.ylabel('Classifier Name') plt.title('Accuracy Graph') plt.show()
code
73067530/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]]) d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Alien', 'Star Wars', 'Casablanca', 'Titanic']) Sigma = np.zeros((7, 5), dtype=float) Sigma[:5, :5] = np.diag(S) np.set_printoptions(precision=2) U = U[:, 0:2] print(f'U:\n{U}') Sigma = np.zeros((2, 2), dtype=float) Sigma[:2, :2] = np.diag(S[:2]) print(f'Sigma:\n{Sigma}') Vt = Vt[0:2, :] print(f'Vt:\n{Vt}') print(f'Reconstructured data:\n {np.dot(U, np.dot(Sigma, Vt))}')
code
73067530/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]]) d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Alien', 'Star Wars', 'Casablanca', 'Titanic']) Sigma = np.zeros((7, 5), dtype=float) Sigma[:5, :5] = np.diag(S) U = U[:, 0:2] Sigma = np.zeros((2, 2), dtype=float) Sigma[:2, :2] = np.diag(S[:2]) Vt = Vt[0:2, :] q = np.array([4, 0, 0, 0, 0]) V = Vt.transpose() print(f'Query: q={q}') qV = np.dot(q, V) print(f'Map to concept space: qV={qV}') print(f'Map back to movie space by multiplying by Vt = {np.dot(qV, Vt)}')
code
73067530/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]]) d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Alien', 'Star Wars', 'Casablanca', 'Titanic']) np.set_printoptions(precision=2) Sigma = np.zeros((7, 5), dtype=float) Sigma[:5, :5] = np.diag(S) print(f'Sigma:\n {Sigma}') print('Reconstructured data: U * Sigma * Vt') print(np.dot(U, np.dot(Sigma, Vt)))
code
73067530/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd np.set_printoptions(precision=1) M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]]) d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Alien', 'Star Wars', 'Casablanca', 'Titanic']) d.head(7)
code
73067530/cell_5
[ "text_plain_output_1.png" ]
from numpy.linalg import svd import numpy as np import pandas as pd import numpy as np import pandas as pd M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]]) d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Alien', 'Star Wars', 'Casablanca', 'Titanic']) from numpy.linalg import svd U, S, Vt = svd(M, full_matrices=True) print(f'U:\n{U}') print(f'S used for building Sigma:\n{S}') print(f'Vt (already transposed):\n{Vt}')
code
89136755/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/osgdxaspectcapital/train.csv', index_col=0) X_test = pd.read_csv('/kaggle/input/osgdxaspectcapital/test.csv', index_col=0) X_train = train_df[[c for c in train_df if c != 'y']] y_train = train_df['y'].values sample = X_train.sample(n=1) sample_market = sample['Market'].values[0] sample_series = sample.iloc[:, 1:].T sample_series.index = pd.to_datetime(sample_series.index, format='Time_%H_%M_%p').time sample_series.plot()
code
90117670/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment
code
90117670/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment russian_equipment.dtypes
code
90117670/cell_19
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment x_data = ['military auto', 'APC'] russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv', parse_dates=True) russian_personnel russian_personnel = russian_personnel.drop('personnel*', axis=1) russian_personnel from sklearn.impute import SimpleImputer russian_personnel_copy = russian_personnel.drop('date', axis=1) my_imputer = SimpleImputer() imputed_data = pd.DataFrame(my_imputer.fit_transform(russian_personnel_copy)) imputed_data.columns = russian_personnel_copy.columns imputed_data russian_personnel_copy = russian_personnel.fillna(0) russian_personnel_copy plt.figure(figsize=(20, 6)) sns.lineplot(data=russian_personnel_copy, x='date', y='personnel') plt.show()
code
90117670/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90117670/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns russian_equipment_no_day = russian_equipment.drop('day', axis=1) russian_equipment_no_day.head() sns.set_style('darkgrid') plt.figure(figsize=(20, 9)) plt.title('Russian Equipment Losses') plt.xlabel('Date') plt.ylabel('Asset') sns.lineplot(data=russian_equipment_no_day) plt.show()
code
90117670/cell_15
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv', parse_dates=True) russian_personnel russian_personnel = russian_personnel.drop('personnel*', axis=1) russian_personnel from sklearn.impute import SimpleImputer russian_personnel_copy = russian_personnel.drop('date', axis=1) my_imputer = SimpleImputer() imputed_data = pd.DataFrame(my_imputer.fit_transform(russian_personnel_copy)) imputed_data.columns = russian_personnel_copy.columns imputed_data
code
90117670/cell_17
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv', parse_dates=True) russian_personnel russian_personnel = russian_personnel.drop('personnel*', axis=1) russian_personnel from sklearn.impute import SimpleImputer russian_personnel_copy = russian_personnel.drop('date', axis=1) my_imputer = SimpleImputer() imputed_data = pd.DataFrame(my_imputer.fit_transform(russian_personnel_copy)) imputed_data.columns = russian_personnel_copy.columns imputed_data russian_personnel_copy = russian_personnel.fillna(0) russian_personnel_copy
code
90117670/cell_14
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv', parse_dates=True) russian_personnel russian_personnel = russian_personnel.drop('personnel*', axis=1) russian_personnel
code
90117670/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(20, 9)) x_data = ['military auto', 'APC'] for vehicle in x_data: sns.lmplot(data=russian_equipment_no_day, x=vehicle, y='tank') plt.show()
code
90117670/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True) russian_equipment russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv', parse_dates=True) russian_personnel
code
2032867/cell_13
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import warnings # We want to suppress warnings warnings.filterwarnings('ignore') HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv') HRData.isnull().any() hrdunique = HRData.nunique() hrdunique = hrdunique.sort_values() hrdunique hrd = HRData.copy() hrd.drop('Over18', axis=1, inplace=True) hrd.drop('StandardHours', axis=1, inplace=True) hrd.drop('EmployeeNumber', axis=1, inplace=True) hrd.drop('EmployeeCount', axis=1, inplace=True) hrd.groupby('Attrition').mean() df = pd.DataFrame(hrd, columns=['Gender', 'Department', 'Attrition']) groupby_DeptnAttrition = df['Gender'].groupby([df['Department'], df['Attrition']]) groupby_DeptnAttrition.describe()
code
2032867/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import warnings # We want to suppress warnings warnings.filterwarnings('ignore') HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv') HRData.head()
code
2032867/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import warnings # We want to suppress warnings warnings.filterwarnings('ignore') HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv') HRData.isnull().any() hrdunique = HRData.nunique() hrdunique = hrdunique.sort_values() hrdunique hrd = HRData.copy() hrd.drop('Over18', axis=1, inplace=True) hrd.drop('StandardHours', axis=1, inplace=True) hrd.drop('EmployeeNumber', axis=1, inplace=True) hrd.drop('EmployeeCount', axis=1, inplace=True) hrd.groupby('Attrition').mean() df = pd.DataFrame(hrd, columns=['Gender', 'Department', 'Attrition']) groupby_DeptnAttrition = df['Gender'].groupby([df['Department'], df['Attrition']]) num_bins = 30 sns.distplot(hrd['Age'])
code