path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068059/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from tqdm import tqdm import gensim import json import nltk import numpy as np import os import pandas as pd import pickle import re import spacy import warnings import os import pandas as pd pd.set_option('max_colwidth', 1000) pd.set_option('max_rows', 100) import numpy as np import pickle import matplotlib.pyplot as plt from datetime import datetime import re import json from tqdm import tqdm import textwrap import importlib as imp from scipy.spatial.distance import cdist from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer import nltk nltk.download('stopwords') import scispacy import spacy from sklearn.feature_extraction.text import CountVectorizer import gensim from gensim import corpora, models from gensim.models.coherencemodel import CoherenceModel import fasttext import pyLDAvis import pyLDAvis.gensim from wordcloud import WordCloud from IPython.display import display import ipywidgets as widgets import warnings warnings.filterwarnings('ignore') os.getcwd() source_column = 'text' id_colname = 'cord_uid' split_sentence_by = '(?<=\\.) ?(?![0-9a-z])' cov_earliest_date = '2019-12-01' cov_key_terms = ['covid\\W19', 'covid19', 'covid', '2019\\Wncov', '2019ncov', 'ncov\\W2019', 'sars\\Wcov\\W2', 'sars\\Wcov2', '新型冠状病毒'] cov_related_terms = '(novel|new)( beta| )coronavirus' input_data_path = '/kaggle/input/CORD-19-research-challenge/' working_data_path = '/kaggle/input/cov19-pickles/' metadata = pd.read_csv(input_data_path + 'metadata.csv', encoding='utf-8').replace({pd.np.nan: None}) metadata.isnull().sum(axis=0) def pdf_or_pmc(r): if r.has_pdf_parse: return 'pdf_json' if r.has_pmc_xml_parse: return 'pmc_json' return '' metadata['sha_arr'] = metadata.apply(lambda r: r.sha.split(';') if r.sha is not None else [], axis=1) metadata['full_text_file_path'] = metadata.apply(lambda r: np.unique(['/'.join([r.full_text_file, r.full_text_file, pdf_or_pmc(r), sha.strip()]) if r.has_pdf_parse or r.has_pmc_xml_parse else '' for sha in r.sha_arr]) if len(r.sha_arr) > 0 else [], axis=1) metadata['publish_time'] = metadata['publish_time'].str.replace(' ([a-zA-Z]{3}-[a-zA-Z]{3})|(Spring)|(Summer)|(Autumn)|(Fall)|(Winter)', '', regex=True).str.strip() metadata['publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y-%m-%d', errors='coerce') mask = metadata.publish_time_.isnull() metadata.loc[mask, 'publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y %B', errors='coerce') mask = metadata.publish_time_.isnull() metadata.loc[mask, 'publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y %b', errors='coerce') mask = metadata.publish_time_.isnull() metadata.loc[mask, 'publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y %B %d', errors='coerce') mask = metadata.publish_time_.isnull() metadata.loc[mask, 'publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y %b %d', errors='coerce') mask = metadata.publish_time_.isnull() metadata.loc[mask, 'publish_time_'] = pd.to_datetime(metadata.publish_time, format='%Y', errors='coerce') mask = metadata.publish_time_.isnull() invalid_dates = metadata.loc[mask, :].shape[0] metadata.publish_time = metadata.publish_time_ metadata.drop(['publish_time_'], inplace=True, axis=1) mask = metadata['full_text_file_path'].apply(lambda r: len(r) > 1) def get_paper_info(json_data): return ' '.join([t['text'] for t in json_data['body_text']]) full_text = [] for r in tqdm(metadata.to_dict(orient='records')): record = [] for p in r['full_text_file_path']: with open(input_data_path + p + '.json', 'r', encoding='utf-8') as f: data = json.load(f) record.append(get_paper_info(data)) full_text_ = '\n'.join(np.unique(record)) if len(record) > 0 else None full_text.append(full_text_) metadata['full_text'] = full_text meta_full_text = metadata meta_full_text[source_column] = np.where(meta_full_text['full_text'].isnull(), meta_full_text['abstract'], meta_full_text['full_text']) meta_full_text = meta_full_text.dropna(subset=[source_column]).reset_index(drop=True) meta_full_text = meta_full_text.sort_values('publish_time', ascending=False).drop_duplicates(source_column) meta_full_text = meta_full_text.sort_values('publish_time', ascending=False).drop_duplicates(id_colname) meta_full_text.drop(['sha', 'pmcid', 'pubmed_id', 'Microsoft Academic Paper ID', 'has_pdf_parse', 'has_pmc_xml_parse', 'full_text_file', 'sha_arr', 'full_text_file_path', 'full_text'], inplace=True, axis=1) pickle.dump(meta_full_text, open(working_data_path + 'all_papers.pkl', 'wb')) meta_full_text = pickle.load(open(working_data_path + 'all_papers.pkl', 'rb')) corpus = meta_full_text[source_column] stop_words = stopwords.words('english') cord_stopwords = ['doi', 'preprint', 'copyright', 'peer', 'reviewed', 'org', 'https', 'et', 'al', 'author', 'figure', 'rights', 'reserved', 'permission', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'Elsevier', 'PMC', 'CZI', '-PRON-', 'abstract'] for word in tqdm(cord_stopwords): if word not in stop_words: stop_words.append(word) else: continue nlp_lg = spacy.load('en_core_sci_lg', disable=['tagger', 'parser', 'ner']) nlp_lg.max_length = 2000000 for w in tqdm(stop_words): nlp_lg.vocab[w].is_stop = True def removeParenthesesNumbers(v): char_list_rm = ['[(]', '[)]', '[′·]'] char_list_rm_spc = [' no[nt]-', ' non', ' low-', ' high-'] v = re.sub('|'.join(char_list_rm), '', v) v = re.sub('|'.join(char_list_rm_spc), ' ', v) return v sentence_test = '($2196.8)/case (in)fidelity μg μg/ml a=b2 www.website.org α-gal 2-len a.' def spacy_tokenizer(sentence): sentence = removeParenthesesNumbers(sentence) token_rm = ['(www.\\S+)', '(-[1-9.])', '([∼≈≥≤≦⩾⩽→μ]\\S+)', '(\\S+=\\S+)', '(http\\S+)'] tokenized_list = [word.lemma_ for word in nlp_lg(sentence) if not (word.like_num or word.is_stop or word.is_punct or word.is_space)] tokenized_list = [word for word in tokenized_list if not re.search('|'.join(token_rm), word)] tokenized_list = [word for word in tokenized_list if len(re.findall('[a-zA-Z]', word)) > 1] tokenized_list = [word for word in tokenized_list if re.search('^[a-zA-Z0-9]', word)] return tokenized_list spacy_tokenizer(sentence_test) vec = CountVectorizer(max_df=0.8, min_df=0.001, tokenizer=spacy_tokenizer) X = vec.fit_transform(tqdm(corpus)) valid_tokens = vec.get_feature_names() X = pickle.load(open(working_data_path + 'TM_X.pkl', 'rb')) valid_tokens = pickle.load(open(working_data_path + 'TM_valid_tokens.pkl', 'rb')) arr = X.toarray() texts = [] for i in tqdm(range(arr.shape[0])): text = [] for j in range(arr.shape[1]): occurrence = arr[i, j] if occurrence > 0: text.extend([valid_tokens[j]] * occurrence) texts.append(text) texts = pickle.load(open(working_data_path + 'TM_texts.pkl', 'rb')) np.random.seed(1) dictionary = gensim.corpora.Dictionary(texts) count = 0 for k, v in dictionary.iteritems(): print(k, v) count += 1 if count > 10: break
code
32068059/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from nltk.corpus import stopwords from tqdm import tqdm import spacy stop_words = stopwords.words('english') cord_stopwords = ['doi', 'preprint', 'copyright', 'peer', 'reviewed', 'org', 'https', 'et', 'al', 'author', 'figure', 'rights', 'reserved', 'permission', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'Elsevier', 'PMC', 'CZI', '-PRON-', 'abstract'] for word in tqdm(cord_stopwords): if word not in stop_words: stop_words.append(word) else: continue nlp_lg = spacy.load('en_core_sci_lg', disable=['tagger', 'parser', 'ner']) nlp_lg.max_length = 2000000 for w in tqdm(stop_words): nlp_lg.vocab[w].is_stop = True
code
32068059/cell_17
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from tqdm import tqdm stop_words = stopwords.words('english') cord_stopwords = ['doi', 'preprint', 'copyright', 'peer', 'reviewed', 'org', 'https', 'et', 'al', 'author', 'figure', 'rights', 'reserved', 'permission', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'Elsevier', 'PMC', 'CZI', '-PRON-', 'abstract'] for word in tqdm(cord_stopwords): if word not in stop_words: stop_words.append(word) else: continue
code
32068059/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords from tqdm import tqdm import re import spacy stop_words = stopwords.words('english') cord_stopwords = ['doi', 'preprint', 'copyright', 'peer', 'reviewed', 'org', 'https', 'et', 'al', 'author', 'figure', 'rights', 'reserved', 'permission', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'Elsevier', 'PMC', 'CZI', '-PRON-', 'abstract'] for word in tqdm(cord_stopwords): if word not in stop_words: stop_words.append(word) else: continue nlp_lg = spacy.load('en_core_sci_lg', disable=['tagger', 'parser', 'ner']) nlp_lg.max_length = 2000000 for w in tqdm(stop_words): nlp_lg.vocab[w].is_stop = True def removeParenthesesNumbers(v): char_list_rm = ['[(]', '[)]', '[′·]'] char_list_rm_spc = [' no[nt]-', ' non', ' low-', ' high-'] v = re.sub('|'.join(char_list_rm), '', v) v = re.sub('|'.join(char_list_rm_spc), ' ', v) return v sentence_test = '($2196.8)/case (in)fidelity μg μg/ml a=b2 www.website.org α-gal 2-len a.' def spacy_tokenizer(sentence): sentence = removeParenthesesNumbers(sentence) token_rm = ['(www.\\S+)', '(-[1-9.])', '([∼≈≥≤≦⩾⩽→μ]\\S+)', '(\\S+=\\S+)', '(http\\S+)'] tokenized_list = [word.lemma_ for word in nlp_lg(sentence) if not (word.like_num or word.is_stop or word.is_punct or word.is_space)] tokenized_list = [word for word in tokenized_list if not re.search('|'.join(token_rm), word)] tokenized_list = [word for word in tokenized_list if len(re.findall('[a-zA-Z]', word)) > 1] tokenized_list = [word for word in tokenized_list if re.search('^[a-zA-Z0-9]', word)] return tokenized_list spacy_tokenizer(sentence_test)
code
32068059/cell_5
[ "image_output_1.png" ]
import nltk import numpy as np import os import pandas as pd import warnings import os import pandas as pd pd.set_option('max_colwidth', 1000) pd.set_option('max_rows', 100) import numpy as np np.set_printoptions(threshold=10000) import pickle import matplotlib.pyplot as plt from datetime import datetime import re import json from tqdm import tqdm import textwrap import importlib as imp from scipy.spatial.distance import cdist from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer import nltk nltk.download('stopwords') import scispacy import spacy from sklearn.feature_extraction.text import CountVectorizer import gensim from gensim import corpora, models from gensim.models.coherencemodel import CoherenceModel import fasttext import pyLDAvis import pyLDAvis.gensim from wordcloud import WordCloud from IPython.display import display import ipywidgets as widgets import warnings warnings.filterwarnings('ignore') os.getcwd()
code
72092559/cell_4
[ "text_plain_output_1.png" ]
def a(): print('a() starts') b() d() print('a() returns') def b(): print('b() starts') c() print('b() returns') def c(): print('c() starts') print('c() returns') def d(): print('d() starts') print('d() returns') a()
code
72092559/cell_2
[ "text_plain_output_1.png" ]
for i in range(1, 10): print(i)
code
72092559/cell_3
[ "text_plain_output_1.png" ]
head = 0 tail = 0 for i in range(1): ran = 0 if ran == 1: head = head + 1 elif ran == 2: tail = tail + 1 else: print('error') print(str(head) + ' vs ' + str(tail))
code
72092559/cell_5
[ "text_plain_output_1.png" ]
perc = 0.1 def plustip(total): return total * perc + total toatlwtip = plustip(12.0) print(toatlwtip) print(perc)
code
2013234/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') f,ax = plt.subplots(1,3,figsize=(15,6)) ax[0].imshow(test.iloc[0].reshape(28,28),cmap='binary') ax[1].imshow(test.iloc[1].reshape(28,28),cmap='binary') ax[2].imshow(test.iloc[2].reshape(28,28),cmap='binary') np.array([np.array([int(i == label) for i in range(10)]) for label in [5, 2, 3, 9]]) labels_encoded = np.array([np.array([int(i == label) for i in range(10)]) for label in df.iloc[:, 0].values]) dataset = df.drop('label', axis=1) dataset = np.multiply(dataset.values.astype(np.float32), 1.0 / 255.0) test = np.multiply(test.values.astype(np.float32), 1.0 / 255.0) (dataset.shape, labels_encoded.shape)
code
2013234/cell_9
[ "text_plain_output_1.png" ]
import numpy as np np.array([np.array([int(i == label) for i in range(10)]) for label in [5, 2, 3, 9]])
code
2013234/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') df.describe()
code
2013234/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') f, ax = plt.subplots(1, 3, figsize=(15, 6)) ax[0].imshow(test.iloc[0].reshape(28, 28), cmap='binary') ax[1].imshow(test.iloc[1].reshape(28, 28), cmap='binary') ax[2].imshow(test.iloc[2].reshape(28, 28), cmap='binary')
code
2013234/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') f,ax = plt.subplots(1,3,figsize=(15,6)) ax[0].imshow(test.iloc[0].reshape(28,28),cmap='binary') ax[1].imshow(test.iloc[1].reshape(28,28),cmap='binary') ax[2].imshow(test.iloc[2].reshape(28,28),cmap='binary') np.array([np.array([int(i == label) for i in range(10)]) for label in [5, 2, 3, 9]]) labels_encoded = np.array([np.array([int(i == label) for i in range(10)]) for label in df.iloc[:, 0].values]) dataset = df.drop('label', axis=1) dataset = np.multiply(dataset.values.astype(np.float32), 1.0 / 255.0) test = np.multiply(test.values.astype(np.float32), 1.0 / 255.0) (dataset.shape, labels_encoded.shape) train_size = 40000 validation_size = 2000 train = dataset[:train_size] train_targets = labels_encoded[:train_size] validation = dataset[train_size:] validation_targets = labels_encoded[train_size:] (train.shape, train_targets.shape, validation.shape, validation_targets.shape, test.shape)
code
129026593/cell_9
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator BASE_PATH = '/kaggle/input/histopathologic-cancer-detection' BASE_TRAIN_PATH = f'{BASE_PATH}/train' BASE_TEST_PATH = f'{BASE_PATH}/test' BASE_TRAIN_LABELS_PATH = '/kaggle/input/dataset-copy/new_dataset/train_labels.csv' BASE_TEST_TRAIN_PATH = f'/kaggle/input/dataset-copy/new_dataset/train' BASE_TEST_TRAIN_10000_PATH = f'{BASE_TEST_TRAIN_PATH}/10000' BASE_TEST_TRAIN_50000_PATH = f'{BASE_TEST_TRAIN_PATH}/150000' BASE_TEST_TRAIN_ALL_PATH = f'{BASE_TEST_TRAIN_PATH}/all' SYMBOLINK_PATH = '/kaggle/working' SYMBOLINK_TRAIN_PATH = f'{SYMBOLINK_PATH}/train' SYMBOLINK_SMALLER_TRAIN_PATH = f'{SYMBOLINK_PATH}/smaller_train' from tensorflow.keras.preprocessing import image_dataset_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator datagen = ImageDataGenerator(rescale=1.0 / 255) train_gen = datagen.flow_from_directory(BASE_TEST_TRAIN_10000_PATH, target_size=(96, 96), batch_size=32, class_mode='binary')
code
129026593/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
code
129026593/cell_10
[ "text_html_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator BASE_PATH = '/kaggle/input/histopathologic-cancer-detection' BASE_TRAIN_PATH = f'{BASE_PATH}/train' BASE_TEST_PATH = f'{BASE_PATH}/test' BASE_TRAIN_LABELS_PATH = '/kaggle/input/dataset-copy/new_dataset/train_labels.csv' BASE_TEST_TRAIN_PATH = f'/kaggle/input/dataset-copy/new_dataset/train' BASE_TEST_TRAIN_10000_PATH = f'{BASE_TEST_TRAIN_PATH}/10000' BASE_TEST_TRAIN_50000_PATH = f'{BASE_TEST_TRAIN_PATH}/150000' BASE_TEST_TRAIN_ALL_PATH = f'{BASE_TEST_TRAIN_PATH}/all' SYMBOLINK_PATH = '/kaggle/working' SYMBOLINK_TRAIN_PATH = f'{SYMBOLINK_PATH}/train' SYMBOLINK_SMALLER_TRAIN_PATH = f'{SYMBOLINK_PATH}/smaller_train' from tensorflow.keras.preprocessing import image_dataset_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator datagen = ImageDataGenerator(rescale=1.0 / 255) train_gen = datagen.flow_from_directory(BASE_TEST_TRAIN_10000_PATH, target_size=(96, 96), batch_size=32, class_mode='binary') print(train_gen.samples)
code
129026593/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation from tensorflow.keras.models import Sequential model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(2, activation='softmax')) model.summary()
code
129026593/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd BASE_PATH = '/kaggle/input/histopathologic-cancer-detection' BASE_TRAIN_PATH = f'{BASE_PATH}/train' BASE_TEST_PATH = f'{BASE_PATH}/test' BASE_TRAIN_LABELS_PATH = '/kaggle/input/dataset-copy/new_dataset/train_labels.csv' BASE_TEST_TRAIN_PATH = f'/kaggle/input/dataset-copy/new_dataset/train' BASE_TEST_TRAIN_10000_PATH = f'{BASE_TEST_TRAIN_PATH}/10000' BASE_TEST_TRAIN_50000_PATH = f'{BASE_TEST_TRAIN_PATH}/150000' BASE_TEST_TRAIN_ALL_PATH = f'{BASE_TEST_TRAIN_PATH}/all' SYMBOLINK_PATH = '/kaggle/working' SYMBOLINK_TRAIN_PATH = f'{SYMBOLINK_PATH}/train' SYMBOLINK_SMALLER_TRAIN_PATH = f'{SYMBOLINK_PATH}/smaller_train' train_labels_df = pd.read_csv(BASE_TRAIN_LABELS_PATH) train_labels_df.set_index('id', inplace=True) train_labels_df.head()
code
74052188/cell_4
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train)
code
74052188/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import OneHotEncoder import numpy as np import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/abalone-dataset/abalone.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(drop='first'), [0])], remainder='passthrough', sparse_threshold=0) X = np.array(ct.fit_transform(X)) from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_predict = regressor.predict(X_train) np.set_printoptions(precision=2) print(np.concatenate((y_predict.reshape(len(y_predict), 1), y_test.reshape(len(y_predict), 1)), 1))
code
18137750/cell_9
[ "text_html_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import cv2 import numpy as np import os import pandas as pd import sys import numpy as np import pandas as pd import cv2 import os import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=None, include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_best_qwk.h5' model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.load_weights(PRETRAINED_MODEL) model.summary() from tqdm import tqdm_notebook as tqdm submit = pd.read_csv('../input/aptos2019-blindness-detection/sample_submission.csv') predicted = [] print('Making predictions...') for i, name in tqdm(enumerate(submit['id_code'])): path = os.path.join('../input/aptos2019-blindness-detection/test_images/', name + '.png') image = cv2.imread(path) image = cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) X = np.array(image[np.newaxis] / 255) raw_prediction = model.predict(X) > 0.5 prediction = raw_prediction.astype(int).sum(axis=1) - 1 predicted.append(prediction[0])
code
18137750/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import os import sys import numpy as np import pandas as pd import cv2 import os import sys sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=None, include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model
code
18137750/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import cv2 import os import sys print(os.listdir('../input'))
code
18137750/cell_11
[ "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import cv2 import numpy as np import os import pandas as pd import sys import numpy as np import pandas as pd import cv2 import os import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=None, include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_best_qwk.h5' model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.load_weights(PRETRAINED_MODEL) model.summary() from tqdm import tqdm_notebook as tqdm submit = pd.read_csv('../input/aptos2019-blindness-detection/sample_submission.csv') predicted = [] for i, name in tqdm(enumerate(submit['id_code'])): path = os.path.join('../input/aptos2019-blindness-detection/test_images/', name + '.png') image = cv2.imread(path) image = cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) X = np.array(image[np.newaxis] / 255) raw_prediction = model.predict(X) > 0.5 prediction = raw_prediction.astype(int).sum(axis=1) - 1 predicted.append(prediction[0]) submit['diagnosis'] = predicted submit.to_csv('submission.csv', index=False) submit.head(10)
code
18137750/cell_7
[ "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import os import pandas as pd import sys import numpy as np import pandas as pd import cv2 import os import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=None, include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_best_qwk.h5' print('Creating model...') model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) print('Restoring model from ' + PRETRAINED_MODEL + '...') model.load_weights(PRETRAINED_MODEL) model.summary()
code
130025106/cell_42
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train)
code
130025106/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum() train.drop_duplicates() train.info()
code
130025106/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train test
code
130025106/cell_56
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train test.drop(columns=['id'], inplace=True) test test
code
130025106/cell_30
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.svm import SVC svm = SVC() svm.fit(X_train, y_train)
code
130025106/cell_33
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.svm import SVC X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.svm import SVC ksvm = SVC(kernel='rbf') ksvm.fit(X_train, y_train)
code
130025106/cell_39
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train)
code
130025106/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train
code
130025106/cell_52
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train test.drop(columns=['id'], inplace=True) test test
code
130025106/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130025106/cell_45
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train)
code
130025106/cell_49
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train) y_pred_lr = lr.predict(X_test) from sklearn.svm import SVC svm = SVC() svm.fit(X_train, y_train) y_pred_svm = svm.predict(X_test) from sklearn.svm import SVC ksvm = SVC(kernel='rbf') ksvm.fit(X_train, y_train) y_pred_ksvm = ksvm.predict(X_test) from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train) y_pred_gnb = gnb.predict(X_test) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred_knn = knn.predict(X_test) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train) y_pred_rfc = rfc.predict(X_test) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) y_pred_dtc = dtc.predict(X_test) df = pd.DataFrame({'Model Name': ['Logistic Regression', 'Linear SVM', 'Kernel SVM', 'Naive Bayes', 'K Nearest Neighbors', 'Decision Tree Classifier', 'Random Forest Classifier'], 'Accuracy Score': [accuracy_score(y_test, y_pred_lr), accuracy_score(y_test, y_pred_svm), accuracy_score(y_test, y_pred_ksvm), accuracy_score(y_test, y_pred_gnb), accuracy_score(y_test, y_pred_knn), accuracy_score(y_test, y_pred_dtc), accuracy_score(y_test, y_pred_rfc)]}) df = df.sort_values(by=['Accuracy Score'], ascending=False) df
code
130025106/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum() train.drop_duplicates() plt.boxplot(train) plt.show()
code
130025106/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum()
code
130025106/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum() train.drop_duplicates() plt.figure(figsize=(18, 12)) sns.heatmap(train.corr(), annot=True, square=True, cmap='BrBG') plt.show()
code
130025106/cell_24
[ "image_output_1.png" ]
X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape)
code
130025106/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum() train.drop_duplicates() train.hist(bins=20, figsize=(18, 12)) plt.show()
code
130025106/cell_53
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train test.drop(columns=['id'], inplace=True) test X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) data_test = sc.transform(test) data_test
code
130025106/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train train.isnull().sum() train.drop_duplicates()
code
130025106/cell_27
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train)
code
130025106/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') train test.drop(columns=['id'], inplace=True) test
code
130025106/cell_36
[ "text_html_output_1.png" ]
from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler X_train = X_sample.iloc[:, :-1].values y_train = X_sample.iloc[:, -1].values X_test = y_sample.iloc[:, :-1].values y_test = y_sample.iloc[:, -1].values (X_train.shape, X_test.shape, y_train.shape, y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train)
code
324276/cell_9
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title('Job Role Interest') plt.show()
code
324276/cell_6
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05, 1)) plt.title('Gender') plt.show()
code
324276/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import pandas as pd import colorsys plt.style.use('seaborn-talk') df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', sep=',')
code
324276/cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt df.Age.hist(bins=100) plt.xlabel('Age') plt.title('Distribution of Age') plt.show()
code
324276/cell_12
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title("Job Role Interest") plt.show() N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.EmploymentField.value_counts().index patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.3, 1)) plt.title('Employment Field') plt.show()
code
2000572/cell_13
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer import pandas as pd import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.groupby('class').describe() def process_text(text): """ What will be covered: 1. Remove punctuation 2. Remove stopwords 3. Return list of clean text words """ nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] return clean_words bow_transformer = CountVectorizer(analyzer=process_text).fit(messages['text']) len(bow_transformer.vocabulary_)
code
2000572/cell_9
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import pandas as pd import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.groupby('class').describe() def process_text(text): """ What will be covered: 1. Remove punctuation 2. Remove stopwords 3. Return list of clean text words """ nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] return clean_words messages['text'].apply(process_text).head()
code
2000572/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.groupby('class').describe()
code
2000572/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.groupby('class').describe() messages.hist(column='length', by='class', bins=50, figsize=(15, 6))
code
2000572/cell_15
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer import pandas as pd import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.groupby('class').describe() def process_text(text): """ What will be covered: 1. Remove punctuation 2. Remove stopwords 3. Return list of clean text words """ nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] return clean_words bow_transformer = CountVectorizer(analyzer=process_text).fit(messages['text']) len(bow_transformer.vocabulary_) messages_bow = bow_transformer.transform(messages['text']) print('Sparse matrix shape ', messages_bow.shape) print('Amount of Non-Zero occurences: ', messages_bow.nnz)
code
2000572/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.head()
code
1010505/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns null_columns = houses.columns[houses.isnull().any()] houses[null_columns].isnull().sum() sns.barplot(houses['TotRmsAbvGrd'], houses['SalePrice']) plt.title('Sale Price vs Number of rooms')
code
1010505/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid', color_codes=True) sns.set(font_scale=1) houses = pd.read_csv('../input/train.csv') houses.head()
code
1010505/cell_3
[ "text_plain_output_1.png" ]
null_columns = houses.columns[houses.isnull().any()] houses[null_columns].isnull().sum()
code
128010152/cell_12
[ "text_plain_output_1.png" ]
from glob import glob import matplotlib.pyplot as plt import tensorflow as tf IMAGE_SIZE = 256 BATCH_SIZE = 16 MAX_TRAIN_IMAGES = 400 train_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/our485/low/*'))[:MAX_TRAIN_IMAGES] val_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/our485/low/*'))[MAX_TRAIN_IMAGES:] test_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/eval15/low/*')) def load_data(image_path): image = tf.io.read_file(image_path) image = tf.image.decode_png(image, channels=3) image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE]) image = image / 255.0 return image def data_generator(low_light_images): dataset = tf.data.Dataset.from_tensor_slices(low_light_images) dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) return dataset train_dataset = data_generator(train_low_light_images) val_dataset = data_generator(val_low_light_images) zero_dce_model = ZeroDCE() zero_dce_model.compile(learning_rate=0.0001) history = zero_dce_model.fit(train_dataset, validation_data=val_dataset, epochs=100) def plot_result(item): plt.plot(history.history[item], label=item) plt.plot(history.history['val_' + item], label='val_' + item) plt.xlabel('Epochs') plt.ylabel(item) plt.title('Train and Validation {} Over Epochs'.format(item), fontsize=14) plt.legend() plt.grid() plt.show() plot_result('total_loss') plot_result('illumination_smoothness_loss') plot_result('spatial_constancy_loss') plot_result('color_constancy_loss') plot_result('exposure_loss')
code
128010152/cell_5
[ "image_output_11.png", "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "image_output_14.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_16.png", "text_plain_output_8.png", "image_output_6.png", "image_output_12.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_15.png", "image_output_9.png" ]
from glob import glob import tensorflow as tf IMAGE_SIZE = 256 BATCH_SIZE = 16 MAX_TRAIN_IMAGES = 400 train_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/our485/low/*'))[:MAX_TRAIN_IMAGES] val_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/our485/low/*'))[MAX_TRAIN_IMAGES:] test_low_light_images = sorted(glob('/kaggle/input/lol-dataset/lol_dataset/eval15/low/*')) def load_data(image_path): image = tf.io.read_file(image_path) image = tf.image.decode_png(image, channels=3) image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE]) image = image / 255.0 return image def data_generator(low_light_images): dataset = tf.data.Dataset.from_tensor_slices(low_light_images) dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) return dataset train_dataset = data_generator(train_low_light_images) val_dataset = data_generator(val_low_light_images) print('Train Dataset:', train_dataset) print('Validation Dataset:', val_dataset)
code
122256403/cell_4
[ "text_html_output_1.png" ]
# check coda version !nvcc --version
code
122256403/cell_34
[ "text_plain_output_1.png" ]
from spacy.tokens import DocBin from spacy.util import filter_spans from tqdm import tqdm import fr_core_news_sm import json import os import os import pandas as pd import pandas as pd import re import re import spacy spacy.require_gpu() import re def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. Returns: list: The cleaned data. """ invalid_span_tokens = re.compile('\\s') cleaned_data = [] for text, annotations in data: entities = annotations['labels'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end while valid_start < len(text) and invalid_span_tokens.match(text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match(text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'labels': valid_entities}]) return cleaned_data def starts_with_punctuation(s): if re.match('^\\W', s): return True else: return False def ends_with_punctuation(s): if re.search('\\W$', s): return True else: return False def dp(train_data): list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['id'], train_data[i]['text'], {'labels': train_data[i]['label']})) ent_prob = [] for i, ele in enumerate(list_train_data): ents = [] for j, elem in enumerate(list_train_data[i][2]['labels']): start = elem[0] end = elem[1] ent = list_train_data[i][1][start:end] if starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'YES')) elif starts_with_punctuation(ent) and (not ends_with_punctuation(ent)): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'NO')) elif not starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'NO', 'YES')) else: ents.append((list_train_data[i][0], ent, start, end, 'NO', 'NO')) for i, ent in enumerate(ents): if ent[4] == 'YES' or ent[5] == 'YES': ent_prob.append(ent) train_data_df = pd.DataFrame(train_data) ent_prob_df = pd.DataFrame(ent_prob, columns=['id', 'text', 'start', 'end', 'punBeg', 'punEnd']) merged = pd.merge(train_data_df, ent_prob_df, on='id') common_index = merged.index train_data_df = train_data_df.drop(index=common_index) for index, row in merged.iterrows(): if row['punBeg'] == 'YES': for item in row['label']: if int(item[0]) == int(row['start']): item[0] = item[0] + 1 if row['punEnd'] == 'YES': for item in row['label']: if int(item[1]) == int(row['end']): item[1] = item[1] - 1 for index, row in merged.iterrows(): new_row = {'id': row['id'], 'text': row['text_x'], 'label': row['label'], 'Comments': row['Comments']} train_data_df = train_data_df.append(new_row, ignore_index=True) dict_list = train_data_df.to_dict(orient='records') list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) for i, ele in enumerate(list_train_data): entities = [] for label in list_train_data[i][1]['labels']: tuple_ = (label[0], label[1], label[2]) entities.append(tuple_) list_train_data[i][1]['labels'] = entities return list_train_data import pandas as pd import json import os os.chdir('/kaggle/input/d/fatimahabib1/niort-sentences') with open('annotations-niort-sentence-level.jsonl', 'r', encoding='utf-8') as f: s = f.read() data = json.loads(s) train_data = data['annotations'] list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['text'], {'labels': train_data[i]['label']})) training_list = trim_entity_spans(list_train_data) ent_prob = [] ents = [] for i, ele in enumerate(training_list): for j, elem in enumerate(training_list[i][1]['labels']): start = elem[0] end = elem[1] ent = training_list[i][0][start:end] num_spaces_beginning = len(ent) - len(ent.lstrip()) num_spaces_end = len(ent) - len(ent.rstrip()) ents.append((training_list[i][0], ent, start, end, num_spaces_beginning, num_spaces_end)) for i, ent in enumerate(ents): if ent[1] != ent[1].strip(): ent_prob.append(ent) nlp = fr_core_news_sm.load() db = DocBin() for text, annot in tqdm(training_list): doc = nlp.make_doc(text) ents = [] for start, end, label in annot['labels']: span = doc.char_span(start, end, label=label, alignment_mode='contract') ents.append(span) pat_orig = len(ents) filtered = filter_spans(ents) pat_filt = len(filtered) doc.ents = ents doc.ents = ents db.add(doc) db.to_disk('/kaggle/working/train.spacy') best_nlp = spacy.load('/kaggle/working/model-best') colors = {'INFO': '#F67DE3', 'SUB': '#7DF6D9'} options = {'colors': colors} doc = best_nlp('pour les constructions de moins de 90 m² de surface de plancher : 30 m² de surface de plancher supplémentaire par rapport à la surface de plancher existante à la date d’approbation du PLU. ') doc = best_nlp('La peinture sur les murs en pierre en taille ou en moellon est interdite. L’emploi à nu des matériaux destinés à être enduits est strictement interdit. ') spacy.displacy.render(doc, style='ent', options=options, jupyter=True)
code
122256403/cell_33
[ "text_plain_output_1.png" ]
from spacy.tokens import DocBin from spacy.util import filter_spans from tqdm import tqdm import fr_core_news_sm import json import os import os import pandas as pd import pandas as pd import re import re import spacy spacy.require_gpu() import re def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. Returns: list: The cleaned data. """ invalid_span_tokens = re.compile('\\s') cleaned_data = [] for text, annotations in data: entities = annotations['labels'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end while valid_start < len(text) and invalid_span_tokens.match(text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match(text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'labels': valid_entities}]) return cleaned_data def starts_with_punctuation(s): if re.match('^\\W', s): return True else: return False def ends_with_punctuation(s): if re.search('\\W$', s): return True else: return False def dp(train_data): list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['id'], train_data[i]['text'], {'labels': train_data[i]['label']})) ent_prob = [] for i, ele in enumerate(list_train_data): ents = [] for j, elem in enumerate(list_train_data[i][2]['labels']): start = elem[0] end = elem[1] ent = list_train_data[i][1][start:end] if starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'YES')) elif starts_with_punctuation(ent) and (not ends_with_punctuation(ent)): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'NO')) elif not starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'NO', 'YES')) else: ents.append((list_train_data[i][0], ent, start, end, 'NO', 'NO')) for i, ent in enumerate(ents): if ent[4] == 'YES' or ent[5] == 'YES': ent_prob.append(ent) train_data_df = pd.DataFrame(train_data) ent_prob_df = pd.DataFrame(ent_prob, columns=['id', 'text', 'start', 'end', 'punBeg', 'punEnd']) merged = pd.merge(train_data_df, ent_prob_df, on='id') common_index = merged.index train_data_df = train_data_df.drop(index=common_index) for index, row in merged.iterrows(): if row['punBeg'] == 'YES': for item in row['label']: if int(item[0]) == int(row['start']): item[0] = item[0] + 1 if row['punEnd'] == 'YES': for item in row['label']: if int(item[1]) == int(row['end']): item[1] = item[1] - 1 for index, row in merged.iterrows(): new_row = {'id': row['id'], 'text': row['text_x'], 'label': row['label'], 'Comments': row['Comments']} train_data_df = train_data_df.append(new_row, ignore_index=True) dict_list = train_data_df.to_dict(orient='records') list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) for i, ele in enumerate(list_train_data): entities = [] for label in list_train_data[i][1]['labels']: tuple_ = (label[0], label[1], label[2]) entities.append(tuple_) list_train_data[i][1]['labels'] = entities return list_train_data import pandas as pd import json import os os.chdir('/kaggle/input/d/fatimahabib1/niort-sentences') with open('annotations-niort-sentence-level.jsonl', 'r', encoding='utf-8') as f: s = f.read() data = json.loads(s) train_data = data['annotations'] list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['text'], {'labels': train_data[i]['label']})) training_list = trim_entity_spans(list_train_data) ent_prob = [] ents = [] for i, ele in enumerate(training_list): for j, elem in enumerate(training_list[i][1]['labels']): start = elem[0] end = elem[1] ent = training_list[i][0][start:end] num_spaces_beginning = len(ent) - len(ent.lstrip()) num_spaces_end = len(ent) - len(ent.rstrip()) ents.append((training_list[i][0], ent, start, end, num_spaces_beginning, num_spaces_end)) for i, ent in enumerate(ents): if ent[1] != ent[1].strip(): ent_prob.append(ent) nlp = fr_core_news_sm.load() db = DocBin() for text, annot in tqdm(training_list): doc = nlp.make_doc(text) ents = [] for start, end, label in annot['labels']: span = doc.char_span(start, end, label=label, alignment_mode='contract') ents.append(span) pat_orig = len(ents) filtered = filter_spans(ents) pat_filt = len(filtered) doc.ents = ents doc.ents = ents db.add(doc) db.to_disk('/kaggle/working/train.spacy') best_nlp = spacy.load('/kaggle/working/model-best') colors = {'INFO': '#F67DE3', 'SUB': '#7DF6D9'} options = {'colors': colors} doc = best_nlp('pour les constructions de moins de 90 m² de surface de plancher : 30 m² de surface de plancher supplémentaire par rapport à la surface de plancher existante à la date d’approbation du PLU. ') spacy.displacy.render(doc, style='ent', options=options, jupyter=True)
code
122256403/cell_29
[ "text_plain_output_1.png" ]
!python -m spacy debug data /kaggle/working/config.cfg --paths.train /kaggle/working/train.spacy --paths.dev /kaggle/working/train.spacy
code
122256403/cell_2
[ "text_plain_output_1.png" ]
!pip install spacy-transformers
code
122256403/cell_7
[ "text_html_output_1.png" ]
import spacy spacy.require_gpu()
code
122256403/cell_3
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!python3 -m spacy download fr_core_news_sm
code
122256403/cell_35
[ "text_html_output_1.png" ]
from spacy.tokens import DocBin from spacy.util import filter_spans from tqdm import tqdm import fr_core_news_sm import json import os import os import pandas as pd import pandas as pd import re import re import spacy spacy.require_gpu() import re def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. Returns: list: The cleaned data. """ invalid_span_tokens = re.compile('\\s') cleaned_data = [] for text, annotations in data: entities = annotations['labels'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end while valid_start < len(text) and invalid_span_tokens.match(text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match(text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'labels': valid_entities}]) return cleaned_data def starts_with_punctuation(s): if re.match('^\\W', s): return True else: return False def ends_with_punctuation(s): if re.search('\\W$', s): return True else: return False def dp(train_data): list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['id'], train_data[i]['text'], {'labels': train_data[i]['label']})) ent_prob = [] for i, ele in enumerate(list_train_data): ents = [] for j, elem in enumerate(list_train_data[i][2]['labels']): start = elem[0] end = elem[1] ent = list_train_data[i][1][start:end] if starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'YES')) elif starts_with_punctuation(ent) and (not ends_with_punctuation(ent)): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'NO')) elif not starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'NO', 'YES')) else: ents.append((list_train_data[i][0], ent, start, end, 'NO', 'NO')) for i, ent in enumerate(ents): if ent[4] == 'YES' or ent[5] == 'YES': ent_prob.append(ent) train_data_df = pd.DataFrame(train_data) ent_prob_df = pd.DataFrame(ent_prob, columns=['id', 'text', 'start', 'end', 'punBeg', 'punEnd']) merged = pd.merge(train_data_df, ent_prob_df, on='id') common_index = merged.index train_data_df = train_data_df.drop(index=common_index) for index, row in merged.iterrows(): if row['punBeg'] == 'YES': for item in row['label']: if int(item[0]) == int(row['start']): item[0] = item[0] + 1 if row['punEnd'] == 'YES': for item in row['label']: if int(item[1]) == int(row['end']): item[1] = item[1] - 1 for index, row in merged.iterrows(): new_row = {'id': row['id'], 'text': row['text_x'], 'label': row['label'], 'Comments': row['Comments']} train_data_df = train_data_df.append(new_row, ignore_index=True) dict_list = train_data_df.to_dict(orient='records') list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) for i, ele in enumerate(list_train_data): entities = [] for label in list_train_data[i][1]['labels']: tuple_ = (label[0], label[1], label[2]) entities.append(tuple_) list_train_data[i][1]['labels'] = entities return list_train_data import pandas as pd import json import os os.chdir('/kaggle/input/d/fatimahabib1/niort-sentences') with open('annotations-niort-sentence-level.jsonl', 'r', encoding='utf-8') as f: s = f.read() data = json.loads(s) train_data = data['annotations'] list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['text'], {'labels': train_data[i]['label']})) training_list = trim_entity_spans(list_train_data) ent_prob = [] ents = [] for i, ele in enumerate(training_list): for j, elem in enumerate(training_list[i][1]['labels']): start = elem[0] end = elem[1] ent = training_list[i][0][start:end] num_spaces_beginning = len(ent) - len(ent.lstrip()) num_spaces_end = len(ent) - len(ent.rstrip()) ents.append((training_list[i][0], ent, start, end, num_spaces_beginning, num_spaces_end)) for i, ent in enumerate(ents): if ent[1] != ent[1].strip(): ent_prob.append(ent) nlp = fr_core_news_sm.load() db = DocBin() for text, annot in tqdm(training_list): doc = nlp.make_doc(text) ents = [] for start, end, label in annot['labels']: span = doc.char_span(start, end, label=label, alignment_mode='contract') ents.append(span) pat_orig = len(ents) filtered = filter_spans(ents) pat_filt = len(filtered) doc.ents = ents doc.ents = ents db.add(doc) db.to_disk('/kaggle/working/train.spacy') best_nlp = spacy.load('/kaggle/working/model-best') colors = {'INFO': '#F67DE3', 'SUB': '#7DF6D9'} options = {'colors': colors} doc = best_nlp('pour les constructions de moins de 90 m² de surface de plancher : 30 m² de surface de plancher supplémentaire par rapport à la surface de plancher existante à la date d’approbation du PLU. ') doc = best_nlp('La peinture sur les murs en pierre en taille ou en moellon est interdite. L’emploi à nu des matériaux destinés à être enduits est strictement interdit. ') test_sent = 'Les règles applicables aux constructions non prévues ci-dessus sont celles auxquelles elles sont le plus directement assimilables. Lorsqu’un projet comporte plusieurs destinations, les places de stationnement se calculent au prorata de la surface de plancher de chaque destination de construction. En cas de changement de destination, il ne sera exigé que les places de stationnement correspondant au différentiel entre les deux destinations. L’ensemble des dispositions prévues dans cet article ne s’applique pas aux demandes d’extension ou de surélévation apportées aux immeubles de logements existants sans création de logement supplémentaire. Ces dispositions ne s’appliquent pas non plus dans le cas de création de surface de plancher liée à une annexe (habitation) sans création de logement supplémentaire. ' doc = best_nlp(test_sent) spacy.displacy.render(doc, style='ent', options=options, jupyter=True)
code
122256403/cell_31
[ "text_plain_output_1.png" ]
!python -m spacy train /kaggle/working/config.cfg --output /kaggle/working/ --paths.train /kaggle/working/train.spacy --paths.dev /kaggle/working/train.spacy --gpu-id 0
code
122256403/cell_22
[ "text_plain_output_1.png" ]
from spacy.tokens import DocBin from spacy.util import filter_spans from tqdm import tqdm import fr_core_news_sm import json import os import os import pandas as pd import pandas as pd import re import re import re def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. Returns: list: The cleaned data. """ invalid_span_tokens = re.compile('\\s') cleaned_data = [] for text, annotations in data: entities = annotations['labels'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end while valid_start < len(text) and invalid_span_tokens.match(text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match(text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'labels': valid_entities}]) return cleaned_data def starts_with_punctuation(s): if re.match('^\\W', s): return True else: return False def ends_with_punctuation(s): if re.search('\\W$', s): return True else: return False def dp(train_data): list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['id'], train_data[i]['text'], {'labels': train_data[i]['label']})) ent_prob = [] for i, ele in enumerate(list_train_data): ents = [] for j, elem in enumerate(list_train_data[i][2]['labels']): start = elem[0] end = elem[1] ent = list_train_data[i][1][start:end] if starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'YES')) elif starts_with_punctuation(ent) and (not ends_with_punctuation(ent)): ents.append((list_train_data[i][0], ent, start, end, 'YES', 'NO')) elif not starts_with_punctuation(ent) and ends_with_punctuation(ent): ents.append((list_train_data[i][0], ent, start, end, 'NO', 'YES')) else: ents.append((list_train_data[i][0], ent, start, end, 'NO', 'NO')) for i, ent in enumerate(ents): if ent[4] == 'YES' or ent[5] == 'YES': ent_prob.append(ent) train_data_df = pd.DataFrame(train_data) ent_prob_df = pd.DataFrame(ent_prob, columns=['id', 'text', 'start', 'end', 'punBeg', 'punEnd']) merged = pd.merge(train_data_df, ent_prob_df, on='id') common_index = merged.index train_data_df = train_data_df.drop(index=common_index) for index, row in merged.iterrows(): if row['punBeg'] == 'YES': for item in row['label']: if int(item[0]) == int(row['start']): item[0] = item[0] + 1 if row['punEnd'] == 'YES': for item in row['label']: if int(item[1]) == int(row['end']): item[1] = item[1] - 1 for index, row in merged.iterrows(): new_row = {'id': row['id'], 'text': row['text_x'], 'label': row['label'], 'Comments': row['Comments']} train_data_df = train_data_df.append(new_row, ignore_index=True) dict_list = train_data_df.to_dict(orient='records') list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) list_train_data = [] for i, ele in enumerate(dict_list): list_train_data.append((dict_list[i]['text'], {'labels': dict_list[i]['label']})) for i, ele in enumerate(list_train_data): entities = [] for label in list_train_data[i][1]['labels']: tuple_ = (label[0], label[1], label[2]) entities.append(tuple_) list_train_data[i][1]['labels'] = entities return list_train_data import pandas as pd import json import os os.chdir('/kaggle/input/d/fatimahabib1/niort-sentences') with open('annotations-niort-sentence-level.jsonl', 'r', encoding='utf-8') as f: s = f.read() data = json.loads(s) train_data = data['annotations'] list_train_data = [] for i, ele in enumerate(train_data): list_train_data.append((train_data[i]['text'], {'labels': train_data[i]['label']})) training_list = trim_entity_spans(list_train_data) ent_prob = [] ents = [] for i, ele in enumerate(training_list): for j, elem in enumerate(training_list[i][1]['labels']): start = elem[0] end = elem[1] ent = training_list[i][0][start:end] num_spaces_beginning = len(ent) - len(ent.lstrip()) num_spaces_end = len(ent) - len(ent.rstrip()) ents.append((training_list[i][0], ent, start, end, num_spaces_beginning, num_spaces_end)) for i, ent in enumerate(ents): if ent[1] != ent[1].strip(): ent_prob.append(ent) nlp = fr_core_news_sm.load() db = DocBin() for text, annot in tqdm(training_list): doc = nlp.make_doc(text) ents = [] for start, end, label in annot['labels']: span = doc.char_span(start, end, label=label, alignment_mode='contract') if span is None: print('Skipping entity') else: ents.append(span) pat_orig = len(ents) filtered = filter_spans(ents) pat_filt = len(filtered) doc.ents = ents doc.ents = ents db.add(doc) db.to_disk('/kaggle/working/train.spacy')
code
122256403/cell_27
[ "text_plain_output_1.png" ]
!python -m spacy init fill-config /kaggle/input/configs/base_config.cfg /kaggle/working/config.cfg
code
106198731/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic565.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic446.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic324.dtypes
code
106198731/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic355.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic505.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106198731/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic386.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic415.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic595.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic475.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic536.sort_values('avgMeasuredTime', ascending=True)
code
106198731/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Traffic324 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158324.csv') Traffic355 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158355.csv') Traffic386 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158386.csv') Traffic415 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158415.csv') Traffic446 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158446.csv') Traffic475 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158475.csv') Traffic505 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158505.csv') Traffic536 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158536.csv') Traffic565 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158565.csv') Traffic595 = pd.read_csv('../input/smart-city-traffic-dataset/trafficData158595.csv') Traffic324.dtypes Traffic324.sort_values('avgMeasuredTime', ascending=True)
code
33105040/cell_18
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from fbprophet import Prophet from pmdarima import auto_arima from statsmodels.tsa.arima_model import ARIMA import datetime import pandas as pd import pandas as pd import plotly.graph_objects as go covid_data = pd.read_excel('/kaggle/input/corona-virus-pakistan-dataset-2020/COVID_FINAL_DATA.xlsx') covid_data.isnull().sum() covid_data.dtypes covid_data['Date'] = pd.to_datetime(covid_data['Date']) pak_data = covid_data.copy() deaths = pak_data['Expired'].values.tolist() data = pd.DataFrame(columns = ['ds','y']) data['ds'] = list(pak_data['Date'].unique()) data['y'] = deaths prop=Prophet() prop.fit(data) future=prop.make_future_dataframe(periods=15) prop_forecast=prop.predict(future) forecast = prop_forecast[['ds','yhat']].tail(15) fig = go.Figure() fig.add_trace(go.Scatter(x=pak_data['Date'], y=pak_data['Expired'], mode='lines+markers',marker_color='green',name='Actual')) fig.add_trace(go.Scatter(x=prop_forecast['ds'], y=prop_forecast['yhat_upper'], mode='lines+markers',marker_color='red',name='Predicted')) fig.update_layout(title_text = 'Death Cases (Predicted vs Actual) using Prophet') fig.update_layout(plot_bgcolor='rgb(275, 270, 273)',width=1000, height=600) fig.show() recv = pak_data['Total Recovered'].values.tolist() data = pd.DataFrame(columns = ['ds','y']) data['ds'] = list(pak_data['Date']) data['y'] = recv prop=Prophet() prop.fit(data) future=prop.make_future_dataframe(periods=15) prop_forecast=prop.predict(future) forecast = prop_forecast[['ds','yhat']].tail(15) print(forecast) #fig = plot_plotly(prop, prop_forecast) #fig = prop.plot(prop_forecast,xlabel='Date',ylabel='Confirmed Cases') fig = go.Figure() fig.add_trace(go.Scatter(x=pak_data['Date'], y=pak_data['Total Recovered'], mode='lines+markers',marker_color='green',name='Actual')) fig.add_trace(go.Scatter(x=prop_forecast['ds'], y=prop_forecast['yhat_upper'], mode='lines+markers',marker_color='yellow',name='Predicted')) fig.update_layout(title_text = 'Recovered Cases (Predicted vs Actual) using Prophet') fig.update_layout(plot_bgcolor='rgb(275, 270, 273)',width=600, height=600) fig.show() cc = pak_data['Total Confirmed Cases'].values p, d, q = auto_arima(cc).order print(p, d, q) model = ARIMA(pak_data['Total Confirmed Cases'], order=(p, d, q)) arima = model.fit(disp=True) forecast = arima.forecast(steps=15) pred = list(forecast[0]) print(pred) start_date = pak_data['Date'].max() prediction_dates = [] for i in range(15): date = start_date + datetime.timedelta(days=1) prediction_dates.append(date) start_date = date fig = go.Figure() fig.add_trace(go.Scatter(x=pak_data['Date'], y=pak_data['Total Confirmed Cases'], mode='lines+markers', marker_color='green', name='Actual')) fig.add_trace(go.Scatter(x=prediction_dates, y=pred, mode='lines+markers', marker_color='Orange', name='Predicted')) fig.update_layout(title_text='Confirmed cases Predicted vs Actual using ARIMA') fig.update_layout(plot_bgcolor='rgb(275, 270, 273)', width=600, height=600) fig.show()
code
33105040/cell_15
[ "text_html_output_1.png" ]
from fbprophet import Prophet import pandas as pd import pandas as pd import plotly.graph_objects as go covid_data = pd.read_excel('/kaggle/input/corona-virus-pakistan-dataset-2020/COVID_FINAL_DATA.xlsx') covid_data.isnull().sum() covid_data.dtypes covid_data['Date'] = pd.to_datetime(covid_data['Date']) pak_data = covid_data.copy() deaths = pak_data['Expired'].values.tolist() data = pd.DataFrame(columns = ['ds','y']) data['ds'] = list(pak_data['Date'].unique()) data['y'] = deaths prop=Prophet() prop.fit(data) future=prop.make_future_dataframe(periods=15) prop_forecast=prop.predict(future) forecast = prop_forecast[['ds','yhat']].tail(15) fig = go.Figure() fig.add_trace(go.Scatter(x=pak_data['Date'], y=pak_data['Expired'], mode='lines+markers',marker_color='green',name='Actual')) fig.add_trace(go.Scatter(x=prop_forecast['ds'], y=prop_forecast['yhat_upper'], mode='lines+markers',marker_color='red',name='Predicted')) fig.update_layout(title_text = 'Death Cases (Predicted vs Actual) using Prophet') fig.update_layout(plot_bgcolor='rgb(275, 270, 273)',width=1000, height=600) fig.show() data
code
33105040/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from fbprophet import Prophet import pandas as pd import pandas as pd import plotly.graph_objects as go covid_data = pd.read_excel('/kaggle/input/corona-virus-pakistan-dataset-2020/COVID_FINAL_DATA.xlsx') covid_data.isnull().sum() covid_data.dtypes covid_data['Date'] = pd.to_datetime(covid_data['Date']) pak_data = covid_data.copy() deaths = pak_data['Expired'].values.tolist() data = pd.DataFrame(columns=['ds', 'y']) data['ds'] = list(pak_data['Date'].unique()) data['y'] = deaths prop = Prophet() prop.fit(data) future = prop.make_future_dataframe(periods=15) prop_forecast = prop.predict(future) forecast = prop_forecast[['ds', 'yhat']].tail(15) fig = go.Figure() fig.add_trace(go.Scatter(x=pak_data['Date'], y=pak_data['Expired'], mode='lines+markers', marker_color='green', name='Actual')) fig.add_trace(go.Scatter(x=prop_forecast['ds'], y=prop_forecast['yhat_upper'], mode='lines+markers', marker_color='red', name='Predicted')) fig.update_layout(title_text='Death Cases (Predicted vs Actual) using Prophet') fig.update_layout(plot_bgcolor='rgb(275, 270, 273)', width=1000, height=600) fig.show()
code
33105040/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd covid_data = pd.read_excel('/kaggle/input/corona-virus-pakistan-dataset-2020/COVID_FINAL_DATA.xlsx') covid_data.isnull().sum() covid_data.dtypes pak_data = covid_data.copy() pak_data.head()
code
50242450/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.despine(left=True, right=True, bottom=True, top=True) sns.set_style('white') df = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', engine='python', error_bad_lines=False) colunas = ['Q1', 'Q2', 'Q4', 'Q5', 'Q6', 'Q8', 'Q11', 'Q13', 'Q15', 'Q20', 'Q21', 'Q24', 'Q25', 'Q30', 'Q32', 'Q38'] for i in colunas: fig, ax = plt.subplots(1,1, figsize=(15, 6)) sns.countplot(y = df[i][1:],data=df.iloc[1:], order=df[i][1:].value_counts().index, palette='Blues_r') fig.text(0.1, 0.95, f'{df[i][0].split("(")[0]}', fontsize=16, fontweight='bold', fontfamily='serif') plt.xlabel(' ', fontsize=20) plt.ylabel('') plt.yticks(fontsize=13) plt.box(False) colunas = ['Q7', 'Q10', 'Q12', 'Q14', 'Q16', 'Q17', 'Q18', 'Q19', 'Q23', 'Q26', 'Q27', 'Q28', 'Q29', 'Q31', 'Q33', 'Q34', 'Q35', 'Q36', 'Q37'] for j in colunas: df_q = df[[i for i in df.columns if j in i]] df_q_count = pd.Series(dtype='int') for i in df_q: df_q_count[df_q[i].value_counts().index[0]] = df_q[i].count() ax, fig = plt.subplots(1, 1, figsize=(15, 6)) sns.barplot(y=df_q_count.sort_values()[::-1].index, x=df_q_count.sort_values()[::-1], palette='Blues_r') fig.text(0, -1, f"\n\n{df[i][0].split('(')[0]}\n", fontsize=20, fontweight='bold', fontfamily='monospace') plt.box(False) plt.xlabel('') plt.ylabel('') plt.yticks(fontsize=20)
code
50242450/cell_9
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
import pandas as pd df = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', engine='python', error_bad_lines=False) df.head()
code
50242450/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.despine(left=True, right=True, bottom=True, top=True) sns.set_style('white') df = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', engine='python', error_bad_lines=False) colunas = ['Q1', 'Q2', 'Q4', 'Q5', 'Q6', 'Q8', 'Q11', 'Q13', 'Q15', 'Q20', 'Q21', 'Q24', 'Q25', 'Q30', 'Q32', 'Q38'] for i in colunas: fig, ax = plt.subplots(1, 1, figsize=(15, 6)) sns.countplot(y=df[i][1:], data=df.iloc[1:], order=df[i][1:].value_counts().index, palette='Blues_r') fig.text(0.1, 0.95, f"{df[i][0].split('(')[0]}", fontsize=16, fontweight='bold', fontfamily='serif') plt.xlabel(' ', fontsize=20) plt.ylabel('') plt.yticks(fontsize=13) plt.box(False)
code
50242450/cell_7
[ "image_output_11.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.despine(left=True, right=True, bottom=True, top=True) sns.set_style('white')
code
106198134/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
glimpse(dailyActivity)
code
106198134/cell_9
[ "text_html_output_1.png" ]
colnames(dailyActivity)
code
106198134/cell_11
[ "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_1.png", "text_html_output_3.png" ]
head(dailyActivity)
code
106198134/cell_15
[ "text_html_output_1.png" ]
skim_without_charts(dailyActivity)
code
106198134/cell_3
[ "text_plain_output_1.png" ]
installed.packages('tidyverse') installed.packages('readr') installed.packages('here') installed.packages('skimr') installed.packages('dplyr') installed.packages('janitor')
code