path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18140562/cell_2
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
18140562/cell_19
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import cross_val_score, train_test_split from sklearn.svm import LinearSVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import seaborn as sns from imblearn.over_sampling import SMOTE from nltk.tokenize import word_tokenize from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from sklearn.preprocessing import LabelEncoder from xgboost import XGBClassifier from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score, train_test_split from sklearn.metrics import confusion_matrix, accuracy_score df_train = pd.read_excel('../input/Data_Train.xlsx') df_test = pd.read_excel('../input/Data_Test.xlsx') df_train.sample(5) df_train.isna().sum() X = df_train['STORY'] y = df_train['SECTION'] tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), stop_words=stop_words) tfidf_vec.fit(X) x_vec = tfidf_vec.transform(X) svc = LinearSVC(C=10.0) print(cross_val_score(svc, x_vec, y, cv=10, verbose=False).mean())
code
18140562/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import seaborn as sns from imblearn.over_sampling import SMOTE from nltk.tokenize import word_tokenize from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from sklearn.preprocessing import LabelEncoder from xgboost import XGBClassifier from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score, train_test_split from sklearn.metrics import confusion_matrix, accuracy_score
code
18140562/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_excel('../input/Data_Train.xlsx') df_test = pd.read_excel('../input/Data_Test.xlsx') df_train.sample(5) df_train.isna().sum() plt.figure(figsize=(8, 5)) labels = list(set(df_train['SECTION'])) counts = [] for label in labels: counts.append(np.count_nonzero(df_train['SECTION'] == label)) plt.pie(counts, labels=labels, autopct='%1.1f%%') plt.show()
code
18140562/cell_16
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import seaborn as sns from imblearn.over_sampling import SMOTE from nltk.tokenize import word_tokenize from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from sklearn.preprocessing import LabelEncoder from xgboost import XGBClassifier from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score, train_test_split from sklearn.metrics import confusion_matrix, accuracy_score df_train = pd.read_excel('../input/Data_Train.xlsx') df_test = pd.read_excel('../input/Data_Test.xlsx') df_train.sample(5) df_train.isna().sum() labels = list(set(df_train['SECTION'])) counts = [] for label in labels: counts.append(np.count_nonzero(df_train['SECTION'] == label)) X = df_train['STORY'] y = df_train['SECTION'] tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), stop_words=stop_words) tfidf_vec.fit(X) x_vec = tfidf_vec.transform(X) sm = SMOTE(random_state=42) x_vec, y = sm.fit_sample(x_vec, y) labels = list(set(y)) counts = [] for label in labels: counts.append(np.count_nonzero(y == label)) plt.pie(counts, labels=labels, autopct='%1.1f%%') plt.show()
code
18140562/cell_22
[ "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import cross_val_score, train_test_split from sklearn.svm import LinearSVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import seaborn as sns from imblearn.over_sampling import SMOTE from nltk.tokenize import word_tokenize from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from sklearn.preprocessing import LabelEncoder from xgboost import XGBClassifier from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score, train_test_split from sklearn.metrics import confusion_matrix, accuracy_score df_train = pd.read_excel('../input/Data_Train.xlsx') df_test = pd.read_excel('../input/Data_Test.xlsx') df_train.sample(5) df_train.isna().sum() X = df_train['STORY'] y = df_train['SECTION'] tfidf_vec = TfidfVectorizer(ngram_range=(1, 2), stop_words=stop_words) tfidf_vec.fit(X) x_vec = tfidf_vec.transform(X) svc = LinearSVC(C=10.0) svc = LinearSVC(C=10.0) svc.fit(x_train_vec, y_train) y_preds = svc.predict(x_eval_vec) confusion_matrix(y_eval, y_preds)
code
89142870/cell_6
[ "text_plain_output_1.png" ]
from pytorch_lightning.callbacks import Callback from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from sklearn.model_selection import KFold from torch.utils.data import DataLoader, TensorDataset, Subset import gc import matplotlib.pyplot as plt import numpy as np import pandas as pd import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F train = pd.read_parquet('../input/train-small/train_small.parquet') float_feature_names = train.drop(['target', 'row_id', 'time_id', 'investment_id'], axis=1).columns float_input = train[float_feature_names].values investment_id = train[['investment_id']].values.astype(int) time_id = train[['time_id']].values.astype(int) targets = train[['target']].values del train float_input = torch.FloatTensor(float_input) investment_id = torch.LongTensor(investment_id) time_id = torch.LongTensor(time_id) target = torch.FloatTensor(targets) dataset = TensorDataset(time_id, investment_id, float_input, target) gc.collect() class UbiquantRegressor(pl.LightningModule): def __init__(self): super(UbiquantRegressor, self).__init__() # Embedding of investment_id to 11 float features. # As the number of unseen investment_ids is unknown, a large margin is selected (10000). self.id_embedding = nn.Embedding(10000,11) # credits to sahil112: https://www.kaggle.com/sahil112/whyonlykeras-easy-pytorch-competitive-dnn for this architecture self.layers = nn.Sequential(nn.Linear(311, 64), nn.BatchNorm1d(64), nn.SiLU(), nn.Dropout(0.4), nn.Linear(64, 128), nn.BatchNorm1d(128), nn.SiLU(), nn.Dropout(0.4), nn.Linear(128, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 512), nn.BatchNorm1d(512), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(512, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 128), nn.BatchNorm1d(128), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(128, 8), nn.BatchNorm1d(8), nn.SiLU(), nn.Dropout(0.4), nn.Linear(8, 1)) def forward(self, time_id, investment_id, f_features): # Embedding of the investment_id invest_embedding = self.id_embedding(investment_id).squeeze(dim=1) # Concat embedding and features. # Open question: should the network have access to the time_id? # The final test set will consist of time_id never seen in the train set # Nevertheless, it can be easily added here... #dnn_input = torch.cat((invest_embedding, time_id, f_features), axis=-1) dnn_input = torch.cat((invest_embedding, f_features), axis=-1) return self.layers(dnn_input) def training_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch out = self(time_id, investment_id, float_input) loss = F.mse_loss(out, target) self.log('train_loss', loss) return loss def validation_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch result = self(time_id, investment_id, float_input) loss = F.mse_loss(result, target) dict = {'val_loss': loss, 'result': result, 'target': target, 'time_id': time_id, 'investment_id': investment_id, } return dict def validation_epoch_end(self, outputs): val_losses = [x['val_loss'] for x in outputs] result = torch.cat([x['result'] for x in outputs]) target = torch.cat([x['target'] for x in outputs]) time_ids = torch.cat([x['time_id'] for x in outputs]) investment_ids = torch.cat([x['investment_id'] for x in outputs]) corrs = [] for t in torch.unique(time_ids): t_results = result[time_ids == t] t_target = target[time_ids == t] # corr = torch.corrcoef(torch.stack((t_results, t_target)))[0,1] # use this when pytorch>=1.10 corr = np.corrcoef(torch.stack((t_results, t_target)).cpu().numpy())[0, 1] corrs.append(corr) # mean_corr = torch.mean(torch.stack(corrs)) # use this when pytorch>=1.10 mean_corr = np.nanmean(corrs) epoch_loss = torch.stack(val_losses).mean() # Combine losses self.log('val_loss', epoch_loss, prog_bar=True) self.log('mean_corr', mean_corr, prog_bar=True) dict = {'val_loss': epoch_loss, 'corrs': mean_corr} return dict def epoch_end(self, epoch, result): pass def test_step(self, batch, batch_nb): pass def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001) class MetricTracker(Callback): def __init__(self): self.val_losses = [] self.corrs = [] def on_validation_epoch_end(self, trainer, module): self.val_losses.append(trainer._results['validation_epoch_end.val_loss'].value.cpu().numpy()) # track them self.corrs.append(trainer._results['validation_epoch_end.mean_corr'].value.cpu().numpy()) # track them if 0: #index==1: # live plotting of results during training, switched off ax.plot(self.val_losses, color="orange") ax.set_ylabel("Val loss", color="orange", fontsize=14) ax2 = ax.twinx() ax2.plot(self.corrs, color="blue") ax2.set_ylabel("Mean daily corr 2 target", color="blue", fontsize=14) plt.show() n_splits=10 kf = KFold(n_splits=n_splits, shuffle=True) val_losses = [] mean_corrs = [] models = [] # A list of all final models index=0 for train_index, test_index in kf.split(dataset): index+=1 print("CV run {}...".format(index)) train_ds, val_ds = Subset(dataset, train_index), Subset(dataset, test_index) train_loader = DataLoader(train_ds, 32768) val_loader = DataLoader(val_ds, 32768) uq_regressor = UbiquantRegressor() metricTracker = MetricTracker() trainer = pl.Trainer(gpus=1, callbacks=[metricTracker, EarlyStopping(monitor="mean_corr", mode="max", patience=3), ModelCheckpoint(save_top_k=1, monitor="mean_corr", mode="max", save_on_train_epoch_end=False)], max_epochs=21, num_sanity_val_steps=0,) trainer.fit(uq_regressor, train_loader, val_loader) # Load best model based on mean daily correlation with target uq_regressor = UbiquantRegressor().load_from_checkpoint(trainer.checkpoint_callback.best_model_path) uq_regressor.eval() models.append(uq_regressor) # Show val results val_result = trainer.validate(model=uq_regressor, dataloaders=val_loader) val_losses.append(val_result[0]['val_loss']) mean_corrs.append(val_result[0]['mean_corr']) fig, ax = plt.subplots() ax.plot(metricTracker.val_losses, color="orange") ax.set_ylabel("Val loss", color="orange", fontsize=14) ax2 = ax.twinx() ax2.plot(metricTracker.corrs, color="blue") ax2.set_ylabel("Mean daily corr 2 target", color="blue", fontsize=14) plt.show() val_result = trainer.validate(model=uq_regressor, dataloaders=val_loader)
code
89142870/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from pytorch_lightning.callbacks import Callback from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from sklearn.model_selection import KFold from torch.utils.data import DataLoader, TensorDataset, Subset import gc import matplotlib.pyplot as plt import numpy as np import pandas as pd import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F import ubiquant train = pd.read_parquet('../input/train-small/train_small.parquet') float_feature_names = train.drop(['target', 'row_id', 'time_id', 'investment_id'], axis=1).columns float_input = train[float_feature_names].values investment_id = train[['investment_id']].values.astype(int) time_id = train[['time_id']].values.astype(int) targets = train[['target']].values del train float_input = torch.FloatTensor(float_input) investment_id = torch.LongTensor(investment_id) time_id = torch.LongTensor(time_id) target = torch.FloatTensor(targets) dataset = TensorDataset(time_id, investment_id, float_input, target) gc.collect() class UbiquantRegressor(pl.LightningModule): def __init__(self): super(UbiquantRegressor, self).__init__() # Embedding of investment_id to 11 float features. # As the number of unseen investment_ids is unknown, a large margin is selected (10000). self.id_embedding = nn.Embedding(10000,11) # credits to sahil112: https://www.kaggle.com/sahil112/whyonlykeras-easy-pytorch-competitive-dnn for this architecture self.layers = nn.Sequential(nn.Linear(311, 64), nn.BatchNorm1d(64), nn.SiLU(), nn.Dropout(0.4), nn.Linear(64, 128), nn.BatchNorm1d(128), nn.SiLU(), nn.Dropout(0.4), nn.Linear(128, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 512), nn.BatchNorm1d(512), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(512, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 128), nn.BatchNorm1d(128), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(128, 8), nn.BatchNorm1d(8), nn.SiLU(), nn.Dropout(0.4), nn.Linear(8, 1)) def forward(self, time_id, investment_id, f_features): # Embedding of the investment_id invest_embedding = self.id_embedding(investment_id).squeeze(dim=1) # Concat embedding and features. # Open question: should the network have access to the time_id? # The final test set will consist of time_id never seen in the train set # Nevertheless, it can be easily added here... #dnn_input = torch.cat((invest_embedding, time_id, f_features), axis=-1) dnn_input = torch.cat((invest_embedding, f_features), axis=-1) return self.layers(dnn_input) def training_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch out = self(time_id, investment_id, float_input) loss = F.mse_loss(out, target) self.log('train_loss', loss) return loss def validation_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch result = self(time_id, investment_id, float_input) loss = F.mse_loss(result, target) dict = {'val_loss': loss, 'result': result, 'target': target, 'time_id': time_id, 'investment_id': investment_id, } return dict def validation_epoch_end(self, outputs): val_losses = [x['val_loss'] for x in outputs] result = torch.cat([x['result'] for x in outputs]) target = torch.cat([x['target'] for x in outputs]) time_ids = torch.cat([x['time_id'] for x in outputs]) investment_ids = torch.cat([x['investment_id'] for x in outputs]) corrs = [] for t in torch.unique(time_ids): t_results = result[time_ids == t] t_target = target[time_ids == t] # corr = torch.corrcoef(torch.stack((t_results, t_target)))[0,1] # use this when pytorch>=1.10 corr = np.corrcoef(torch.stack((t_results, t_target)).cpu().numpy())[0, 1] corrs.append(corr) # mean_corr = torch.mean(torch.stack(corrs)) # use this when pytorch>=1.10 mean_corr = np.nanmean(corrs) epoch_loss = torch.stack(val_losses).mean() # Combine losses self.log('val_loss', epoch_loss, prog_bar=True) self.log('mean_corr', mean_corr, prog_bar=True) dict = {'val_loss': epoch_loss, 'corrs': mean_corr} return dict def epoch_end(self, epoch, result): pass def test_step(self, batch, batch_nb): pass def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001) class MetricTracker(Callback): def __init__(self): self.val_losses = [] self.corrs = [] def on_validation_epoch_end(self, trainer, module): self.val_losses.append(trainer._results['validation_epoch_end.val_loss'].value.cpu().numpy()) # track them self.corrs.append(trainer._results['validation_epoch_end.mean_corr'].value.cpu().numpy()) # track them if 0: #index==1: # live plotting of results during training, switched off ax.plot(self.val_losses, color="orange") ax.set_ylabel("Val loss", color="orange", fontsize=14) ax2 = ax.twinx() ax2.plot(self.corrs, color="blue") ax2.set_ylabel("Mean daily corr 2 target", color="blue", fontsize=14) plt.show() n_splits=10 kf = KFold(n_splits=n_splits, shuffle=True) val_losses = [] mean_corrs = [] models = [] # A list of all final models index=0 for train_index, test_index in kf.split(dataset): index+=1 print("CV run {}...".format(index)) train_ds, val_ds = Subset(dataset, train_index), Subset(dataset, test_index) train_loader = DataLoader(train_ds, 32768) val_loader = DataLoader(val_ds, 32768) uq_regressor = UbiquantRegressor() metricTracker = MetricTracker() trainer = pl.Trainer(gpus=1, callbacks=[metricTracker, EarlyStopping(monitor="mean_corr", mode="max", patience=3), ModelCheckpoint(save_top_k=1, monitor="mean_corr", mode="max", save_on_train_epoch_end=False)], max_epochs=21, num_sanity_val_steps=0,) trainer.fit(uq_regressor, train_loader, val_loader) # Load best model based on mean daily correlation with target uq_regressor = UbiquantRegressor().load_from_checkpoint(trainer.checkpoint_callback.best_model_path) uq_regressor.eval() models.append(uq_regressor) # Show val results val_result = trainer.validate(model=uq_regressor, dataloaders=val_loader) val_losses.append(val_result[0]['val_loss']) mean_corrs.append(val_result[0]['mean_corr']) fig, ax = plt.subplots() ax.plot(metricTracker.val_losses, color="orange") ax.set_ylabel("Val loss", color="orange", fontsize=14) ax2 = ax.twinx() ax2.plot(metricTracker.corrs, color="blue") ax2.set_ylabel("Mean daily corr 2 target", color="blue", fontsize=14) plt.show() import ubiquant env = ubiquant.make_env() iter_test = env.iter_test() for test_df, sample_prediction_df in iter_test: time_id = test_df.row_id.str.split('_', expand=True)[0].values.astype(int) investment_id = test_df[['investment_id']].values.astype(int) float_input = test_df[float_feature_names].values float_input = torch.FloatTensor(float_input) investment_id = torch.LongTensor(investment_id) time_id = torch.LongTensor(time_id).unsqueeze(-1) sample_prediction_df['target'] = 0 for uq_regressor in models: predictions = uq_regressor(time_id, investment_id, float_input).squeeze() sample_prediction_df['target'] += predictions.detach().cpu().numpy() / n_splits env.predict(sample_prediction_df) display(sample_prediction_df)
code
89142870/cell_3
[ "text_plain_output_1.png" ]
from torch.utils.data import DataLoader, TensorDataset, Subset import gc import pandas as pd import torch train = pd.read_parquet('../input/train-small/train_small.parquet') float_feature_names = train.drop(['target', 'row_id', 'time_id', 'investment_id'], axis=1).columns float_input = train[float_feature_names].values investment_id = train[['investment_id']].values.astype(int) time_id = train[['time_id']].values.astype(int) targets = train[['target']].values del train float_input = torch.FloatTensor(float_input) investment_id = torch.LongTensor(investment_id) time_id = torch.LongTensor(time_id) target = torch.FloatTensor(targets) dataset = TensorDataset(time_id, investment_id, float_input, target) gc.collect()
code
89142870/cell_5
[ "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_5.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_18.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_16.png", "text_plain_output_8.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_9.png" ]
from pytorch_lightning.callbacks import Callback from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from sklearn.model_selection import KFold from torch.utils.data import DataLoader, TensorDataset, Subset import gc import matplotlib.pyplot as plt import numpy as np import pandas as pd import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F train = pd.read_parquet('../input/train-small/train_small.parquet') float_feature_names = train.drop(['target', 'row_id', 'time_id', 'investment_id'], axis=1).columns float_input = train[float_feature_names].values investment_id = train[['investment_id']].values.astype(int) time_id = train[['time_id']].values.astype(int) targets = train[['target']].values del train float_input = torch.FloatTensor(float_input) investment_id = torch.LongTensor(investment_id) time_id = torch.LongTensor(time_id) target = torch.FloatTensor(targets) dataset = TensorDataset(time_id, investment_id, float_input, target) gc.collect() class UbiquantRegressor(pl.LightningModule): def __init__(self): super(UbiquantRegressor, self).__init__() # Embedding of investment_id to 11 float features. # As the number of unseen investment_ids is unknown, a large margin is selected (10000). self.id_embedding = nn.Embedding(10000,11) # credits to sahil112: https://www.kaggle.com/sahil112/whyonlykeras-easy-pytorch-competitive-dnn for this architecture self.layers = nn.Sequential(nn.Linear(311, 64), nn.BatchNorm1d(64), nn.SiLU(), nn.Dropout(0.4), nn.Linear(64, 128), nn.BatchNorm1d(128), nn.SiLU(), nn.Dropout(0.4), nn.Linear(128, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 512), nn.BatchNorm1d(512), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(512, 256), nn.BatchNorm1d(256), nn.SiLU(), nn.Dropout(0.4), nn.Linear(256, 128), nn.BatchNorm1d(128), nn.SiLU(0.1), nn.Dropout(0.4), nn.Linear(128, 8), nn.BatchNorm1d(8), nn.SiLU(), nn.Dropout(0.4), nn.Linear(8, 1)) def forward(self, time_id, investment_id, f_features): # Embedding of the investment_id invest_embedding = self.id_embedding(investment_id).squeeze(dim=1) # Concat embedding and features. # Open question: should the network have access to the time_id? # The final test set will consist of time_id never seen in the train set # Nevertheless, it can be easily added here... #dnn_input = torch.cat((invest_embedding, time_id, f_features), axis=-1) dnn_input = torch.cat((invest_embedding, f_features), axis=-1) return self.layers(dnn_input) def training_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch out = self(time_id, investment_id, float_input) loss = F.mse_loss(out, target) self.log('train_loss', loss) return loss def validation_step(self, batch, batch_nb): time_id, investment_id, float_input, target = batch result = self(time_id, investment_id, float_input) loss = F.mse_loss(result, target) dict = {'val_loss': loss, 'result': result, 'target': target, 'time_id': time_id, 'investment_id': investment_id, } return dict def validation_epoch_end(self, outputs): val_losses = [x['val_loss'] for x in outputs] result = torch.cat([x['result'] for x in outputs]) target = torch.cat([x['target'] for x in outputs]) time_ids = torch.cat([x['time_id'] for x in outputs]) investment_ids = torch.cat([x['investment_id'] for x in outputs]) corrs = [] for t in torch.unique(time_ids): t_results = result[time_ids == t] t_target = target[time_ids == t] # corr = torch.corrcoef(torch.stack((t_results, t_target)))[0,1] # use this when pytorch>=1.10 corr = np.corrcoef(torch.stack((t_results, t_target)).cpu().numpy())[0, 1] corrs.append(corr) # mean_corr = torch.mean(torch.stack(corrs)) # use this when pytorch>=1.10 mean_corr = np.nanmean(corrs) epoch_loss = torch.stack(val_losses).mean() # Combine losses self.log('val_loss', epoch_loss, prog_bar=True) self.log('mean_corr', mean_corr, prog_bar=True) dict = {'val_loss': epoch_loss, 'corrs': mean_corr} return dict def epoch_end(self, epoch, result): pass def test_step(self, batch, batch_nb): pass def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001) class MetricTracker(Callback): def __init__(self): self.val_losses = [] self.corrs = [] def on_validation_epoch_end(self, trainer, module): self.val_losses.append(trainer._results['validation_epoch_end.val_loss'].value.cpu().numpy()) # track them self.corrs.append(trainer._results['validation_epoch_end.mean_corr'].value.cpu().numpy()) # track them if 0: #index==1: # live plotting of results during training, switched off ax.plot(self.val_losses, color="orange") ax.set_ylabel("Val loss", color="orange", fontsize=14) ax2 = ax.twinx() ax2.plot(self.corrs, color="blue") ax2.set_ylabel("Mean daily corr 2 target", color="blue", fontsize=14) plt.show() n_splits = 10 kf = KFold(n_splits=n_splits, shuffle=True) val_losses = [] mean_corrs = [] models = [] index = 0 for train_index, test_index in kf.split(dataset): index += 1 print('CV run {}...'.format(index)) train_ds, val_ds = (Subset(dataset, train_index), Subset(dataset, test_index)) train_loader = DataLoader(train_ds, 32768) val_loader = DataLoader(val_ds, 32768) uq_regressor = UbiquantRegressor() metricTracker = MetricTracker() trainer = pl.Trainer(gpus=1, callbacks=[metricTracker, EarlyStopping(monitor='mean_corr', mode='max', patience=3), ModelCheckpoint(save_top_k=1, monitor='mean_corr', mode='max', save_on_train_epoch_end=False)], max_epochs=21, num_sanity_val_steps=0) trainer.fit(uq_regressor, train_loader, val_loader) uq_regressor = UbiquantRegressor().load_from_checkpoint(trainer.checkpoint_callback.best_model_path) uq_regressor.eval() models.append(uq_regressor) val_result = trainer.validate(model=uq_regressor, dataloaders=val_loader) val_losses.append(val_result[0]['val_loss']) mean_corrs.append(val_result[0]['mean_corr']) fig, ax = plt.subplots() ax.plot(metricTracker.val_losses, color='orange') ax.set_ylabel('Val loss', color='orange', fontsize=14) ax2 = ax.twinx() ax2.plot(metricTracker.corrs, color='blue') ax2.set_ylabel('Mean daily corr 2 target', color='blue', fontsize=14) plt.show()
code
90106156/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data[data['from_lat'] == data['from_lat'].median()].shape
code
90106156/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.info()
code
90106156/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data[data['to_lat'].isnull()]['from_area_id'].value_counts()
code
90106156/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data[data['from_lat'] == data['from_lat'].median()]['from_area_id']
code
90106156/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.head()
code
90106156/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data[data['to_lat'].isnull()].head()
code
90106156/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean()
code
90106156/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) print(data[data['from_lat'] == data['from_lat'].median()]['from_area_id'].max()) print(data['from_lat'].median()) print(data['from_long'].median())
code
90106156/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90106156/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape
code
90106156/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data.info()
code
90106156/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data.info()
code
90106156/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data['vehicle_model_id'].value_counts(normalize=True) * 100
code
90106156/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data.head()
code
90106156/cell_46
[ "text_html_output_1.png" ]
from geopy import distance import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) traveltype = pd.get_dummies(data['travel_type_id'], drop_first=True) data = pd.concat([data, traveltype], axis=1) data = data.drop(['travel_type_id'], axis=1) data.rename(columns={2: 'traveltype_pointtopoint', 3: 'traveltype_hourly'}, inplace=True) def cal_distance(from_lat, from_long, to_lat, to_long): return distance.distance((from_lat, from_long), (to_lat, to_long)).km data['distance'] = data.apply(lambda row: cal_distance(row['from_lat'], row['from_long'], row['to_lat'], row['to_long']), axis=1) data = data.drop(['from_lat', 'from_long', 'to_lat', 'to_long'], axis=1) data[data['time_diff'] < 0].head()
code
90106156/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape data = data.drop(['vehicle_model_id'], axis=1) data.info()
code
90106156/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/yourcabs/YourCabs_training.csv') df.shape data = df.drop(['Car_Cancellation', 'Cost_of_error'], axis=1) target = df[['Car_Cancellation']] data.isnull().mean() data = data.drop(['id', 'user_id', 'package_id', 'to_area_id', 'from_city_id', 'to_city_id', 'to_date'], axis=1) data.shape
code
32069353/cell_2
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import math import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.filterwarnings('ignore') PUBLIC_PRIVATE = 1 import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32069353/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime from datetime import datetime, timedelta from scipy.optimize.minpack import curve_fit from tqdm import tqdm import math import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import time import warnings import math import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.filterwarnings('ignore') PUBLIC_PRIVATE = 1 import os df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') sub_example = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') regr_ensemble_sub = pd.read_csv('/kaggle/input/regressors-ensemble-submission-week-4/submission (4).csv') df_train['ForecastId'] = -1 df_train['DataType'] = 0 df_test['DataType'] = 2 df_test['ConfirmedCases'] = -1 df_test['Fatalities'] = -1 df_test['Id'] = df_test['ForecastId'] + df_train['Id'].max() df_intersection = df_train[df_train['Date'] >= df_test['Date'].min()] df_intersection['DataType'] = 1 df_intersection['ForecastId'] = df_test[df_test['Date'] <= df_train['Date'].max()]['ForecastId'].values df_train = df_train[df_train['Date'] < df_test['Date'].min()] df_test = df_test[df_test['Date'] > df_intersection['Date'].max()] if not PUBLIC_PRIVATE: df_intersection['ConfirmedCases'] = -1 df_intersection['Fatalities'] = -1 df_full = pd.concat([df_train, df_intersection, df_test], sort=False, axis=0) else: df_full = pd.concat([df_train, df_intersection, df_test], sort=False, axis=0) df_full['Province_State'] = df_full['Province_State'].fillna('') df_full['Location'] = ['_'.join(x) for x in zip(df_full['Country_Region'], df_full['Province_State'])] df_full.drop(columns=['Province_State', 'Country_Region'], inplace=True) df_full.shape import time from datetime import datetime df_full['Date'] = pd.to_datetime(df_full['Date']) df_full['Date'] = df_full['Date'].apply(lambda s: time.mktime(s.timetuple())) min_timestamp = np.min(df_full['Date']) df_full['Date'] = df_full['Date'].apply(lambda s: (s - min_timestamp) / 86400.0) import random from scipy.optimize.minpack import curve_fit from sklearn.metrics import r2_score from scipy.special import expit def Gompertz(a, c, t, t0): Q = a * np.exp(-np.exp(-c * (t - t0))) return Q ref = df_full[df_full['DataType'].isin([0, 1]) & (df_full['Location'] == 'China_Anhui')] ref['ConfirmedCases'] /= ref['ConfirmedCases'].max() ref['Fatalities'] /= ref['Fatalities'].max() def getMultiplier(date): try: return 1.0 / ref[ref['Date'] == date]['ConfirmedCases'].values[0] except: return 3.5 locations = list(set(df_full['Location'])) location_sample = ['Brazil_'] train = df_full[df_full['DataType'] == 0] valid = df_full[df_full['DataType'] == 1] test = df_full[df_full['DataType'] == 2] for location in tqdm(locations): _train = train[train['Location'] == location] _valid = valid[valid['Location'] == location] _test = test[test['Location'] == location] n_train_days = _train.Date.nunique() n_valid_days = _valid.Date.nunique() n_test_days = _test.Date.nunique() x_train = range(n_train_days) x_test = range(n_train_days + n_valid_days + n_test_days + 200) y_train_f = _train['Fatalities'] y_train_c = _train['ConfirmedCases'] the_first_one = _train[_train['ConfirmedCases'] > 0.05 * _train['ConfirmedCases'].max()]['Date'].min() first_cases = _train[_train['ConfirmedCases'] > 0.4 * _train['ConfirmedCases'].max()]['Date'].min() if math.isnan(the_first_one): the_first_one = _train['Date'].max() first_cases = _train['Date'].max() + 8 current = _train['Date'].max() - first_cases if location.startswith('China'): lower_c = [0, 0.02, 0] upper_c = [2 * y_train_c.max() + 1, 0.15, 25] else: lower_c = [0, 0.02, the_first_one] upper_c = [getMultiplier(current) * np.max(y_train_c) + 1, 0.15, first_cases + 28] popt_c, pcov_c = curve_fit(Gompertz, x_train, y_train_c, method='trf', bounds=(lower_c, upper_c)) a_max_c, estimated_c_c, estimated_t0_c = popt_c y_predict_c = Gompertz(a_max_c, estimated_c_c, x_test, estimated_t0_c) y_predict_c_at_t0 = Gompertz(a_max_c, estimated_c_c, estimated_t0_c, estimated_t0_c) the_first_one = _train[_train['Fatalities'] > 0.05 * _train['Fatalities'].max()]['Date'].min() first_cases = _train[_train['Fatalities'] > 0.37 * _train['Fatalities'].max()]['Date'].min() if math.isnan(the_first_one): the_first_one = _train['Date'].max() first_cases = _train['Date'].max() + 8 current = _train['Date'].max() - first_cases if location.startswith('China'): lower = [0, 0.02, 0] upper = [2 * y_train_f.max() + 1, 0.15, 25] else: lower = [0, 0.02, the_first_one] upper = [getMultiplier(current) * np.max(y_train_f) + 1, 0.15, first_cases + 28] popt_f, pcov_f = curve_fit(Gompertz, x_train, y_train_f, method='trf', bounds=(lower, upper)) a_max, estimated_c, estimated_t0 = popt_f y_predict_f = Gompertz(a_max, estimated_c, x_test, estimated_t0) y_predict_f_at_t0 = Gompertz(a_max, estimated_c, estimated_t0, estimated_t0) from datetime import datetime, timedelta initial_date = datetime(2020, 1, 22) dates_train = list(x_train) dates_test = list(x_test) for i in range(len(dates_train)): dates_train[i] = initial_date + timedelta(days=dates_train[i]) for i in range(len(dates_test)): dates_test[i] = initial_date + timedelta(days=dates_test[i]) values = y_predict_c[df_full[df_full['DataType'] == 2]['Date'].astype(int).min():df_full[df_full['DataType'] == 2]['Date'].astype(int).max() + 1] df_full.loc[(df_full['DataType'] == 2) & (df_full['Location'] == location), 'ConfirmedCases'] = values values = y_predict_f[df_full[df_full['DataType'] == 2]['Date'].astype(int).min():df_full[df_full['DataType'] == 2]['Date'].astype(int).max() + 1] df_full.loc[(df_full['DataType'] == 2) & (df_full['Location'] == location), 'Fatalities'] = values
code
32069353/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') sub_example = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') regr_ensemble_sub = pd.read_csv('/kaggle/input/regressors-ensemble-submission-week-4/submission (4).csv') regr_ensemble_sub.head()
code
32069353/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import warnings import math import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.filterwarnings('ignore') PUBLIC_PRIVATE = 1 import os df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') sub_example = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') regr_ensemble_sub = pd.read_csv('/kaggle/input/regressors-ensemble-submission-week-4/submission (4).csv') df_train['ForecastId'] = -1 df_train['DataType'] = 0 df_test['DataType'] = 2 df_test['ConfirmedCases'] = -1 df_test['Fatalities'] = -1 df_test['Id'] = df_test['ForecastId'] + df_train['Id'].max() df_intersection = df_train[df_train['Date'] >= df_test['Date'].min()] df_intersection['DataType'] = 1 df_intersection['ForecastId'] = df_test[df_test['Date'] <= df_train['Date'].max()]['ForecastId'].values df_train = df_train[df_train['Date'] < df_test['Date'].min()] df_test = df_test[df_test['Date'] > df_intersection['Date'].max()] if not PUBLIC_PRIVATE: df_intersection['ConfirmedCases'] = -1 df_intersection['Fatalities'] = -1 df_full = pd.concat([df_train, df_intersection, df_test], sort=False, axis=0) else: df_full = pd.concat([df_train, df_intersection, df_test], sort=False, axis=0) df_full['Province_State'] = df_full['Province_State'].fillna('') df_full['Location'] = ['_'.join(x) for x in zip(df_full['Country_Region'], df_full['Province_State'])] df_full.drop(columns=['Province_State', 'Country_Region'], inplace=True) df_full.shape
code
18114340/cell_6
[ "text_plain_output_1.png" ]
from sklearn.linear_model import * from sklearn.metrics import * regr = LinearRegression() regr.fit(X_train, y_train) pred = regr.predict(X_test) mean_squared_error(pred, y_test)
code
18114340/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from keras.optimizers import * import keras from keras.layers import * from keras.models import * from sklearn.model_selection import train_test_split from sklearn.preprocessing import *
code
18114340/cell_8
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df['Test'] = False test_df = pd.read_csv('../input/test.csv') test_df['Test'] = True df = pd.concat([train_df, test_df], sort=False) corr = abs(train_df.corr()) price_corr = corr['SalePrice'].sort_values() columns = list(price_corr[price_corr > 0.51].index) columns.append('Test') columns.append('Id') prepared_data = df[columns].copy() id_col = df['Id'] uniq = prepared_data.apply(lambda x: x.nunique()) idxs = np.array((uniq <= 10) & (uniq > 2)) dummies_columns = prepared_data.iloc[:, idxs].columns cont_cols = set(prepared_data.columns) - set(dummies_columns) prepared_data = pd.get_dummies(prepared_data, columns=dummies_columns) prepared_data.fillna(0, inplace=True) price_multy = prepared_data['SalePrice'].max() scaler = MinMaxScaler() prepared_data.loc[:, cont_cols] /= prepared_data.loc[:, cont_cols].max() prepared_data[['Id', 'Test']] = df[['Id', 'Test']] train_data = prepared_data.loc[prepared_data['Test'] != True] train_y = train_data['SalePrice'] train_x = train_data.drop(columns=['SalePrice', 'Id']) X_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.2, random_state=42) from sklearn.linear_model import * from sklearn.metrics import * regr = LinearRegression() regr.fit(X_train, y_train) pred = regr.predict(X_test) mean_squared_error(pred, y_test) result_df = prepared_data.loc[prepared_data['Test'] == True] id_col = result_df['Id'] result_df = result_df.drop(columns=['SalePrice', 'Id']) predictions = regr.predict(result_df) result_df = prepared_data.loc[prepared_data['Test'] == True] result_df = prepared_data.loc[prepared_data['Test'] == True] id_col = result_df['Id'] result_df = result_df.drop(columns=['SalePrice', 'Id']) predictions = regr.predict(result_df) res_df = pd.DataFrame(predictions * price_multy, columns=['SalePrice']) res_df['Id'] = id_col res_df.to_csv('sub.csv', index=None, header=True) res_df
code
18127692/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') headbrain = headbrain.values X = headbrain[:, 2] Y = headbrain[:, 3] (X.shape, Y.shape) X = X.reshape(len(X), 1) X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3) reg = LinearRegression() reg.fit(X_train, y_train) y_predictions = reg.predict(X_test) print('R-squared :', r2_score(y_test, y_predictions))
code
18127692/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) print('Reading the csv file and looking at the first five rows :\n') headbrain = pd.read_csv('../input/headbrain.csv') print(headbrain.head())
code
18127692/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') print('HeadBrain Info :\n') print(headbrain.info())
code
18127692/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score import os print(os.listdir('../input'))
code
18127692/cell_19
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns headbrain = pd.read_csv('../input/headbrain.csv') headbrain = headbrain.values X = headbrain[:, 2] Y = headbrain[:, 3] (X.shape, Y.shape) def Linear_Regression(X, Y): mean_x = np.mean(X) mean_y = np.mean(Y) n = len(X) numerator = 0 denominator = 0 for i in range(n): numerator += (X[i] - mean_x) * (Y[i] - mean_y) denominator += (X[i] - mean_x) ** 2 m = numerator / denominator c = mean_y - m * mean_x return (m, c) def predict(X, m, c): pred_y = [] for i in range(len(X)): pred_y.append(c + m * X[i]) return pred_y def r2score(y_obs, y_pred): yhat = np.mean(y_obs) ss_res = 0.0 ss_tot = 0.0 for i in range(len(y_obs)): ss_tot += (y_obs[i] - yhat) ** 2 ss_res += (y_obs[i] - y_pred[i]) ** 2 r2 = 1 - ss_res / ss_tot return r2 plt.title('Linear Regression Plot of HeadSize Vs Brain Weight') X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3) m, c = Linear_Regression(X_train, y_train) print('slope = ', m) print('intercept = ', c) y_pred = predict(X_test, m, c) print('R-squared :', r2score(y_test, y_pred)) plt.plot(X_test, y_pred, color='red', label='Linear Regression') plt.scatter(X_train, y_train, c='b', label='Scatter Plot') plt.xlabel('Head Size') plt.ylabel('Brain Weight') plt.legend() plt.show()
code
18127692/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') print('Checking for any null values:\n') print(headbrain.isnull().any())
code
18127692/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') print('Checking for unique values in each column:\n') print(headbrain.nunique())
code
18127692/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns headbrain = pd.read_csv('../input/headbrain.csv') plt.figure(figsize=(10, 10)) sns.scatterplot(y='Brain Weight(grams)', x='Head Size(cm^3)', data=headbrain) plt.show()
code
18127692/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') headbrain = headbrain.values X = headbrain[:, 2] Y = headbrain[:, 3] (X.shape, Y.shape)
code
18127692/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) headbrain = pd.read_csv('../input/headbrain.csv') print(headbrain.shape)
code
128012739/cell_4
[ "text_html_output_1.png" ]
from datetime import datetime import datetime import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time)
code
128012739/cell_6
[ "text_plain_output_1.png" ]
from datetime import datetime import datetime import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_datareader.data as web import time import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time) connected = False while not connected: try: ticker_df = web.get_data_yahoo(ticker, start=start_time, end=end_time) connected = True except Exception as e: time.sleep(5) pass ticker_df = ticker_df.reset_index() df = pd.DataFrame(ticker_df) df = df[df['Volume'] != 0] df.reset_index(drop=True, inplace=True) df.isna().sum() df.tail()
code
128012739/cell_2
[ "text_plain_output_1.png" ]
!pip install yfinance --upgrade --no-cache-dir !pip install empyrical import yfinance as yf import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go import pandas_datareader.data as web import datetime import matplotlib.dates as mdates import time import empyrical as em import plotly.express as px #from IPython.core.display import display, HTML #display(HTML("<style>.container { width:100% !important; }</style>")) from plotly import offline from datetime import datetime pd.core.common.is_list_like = pd.api.types.is_list_like offline.init_notebook_mode(connected=True) yf.pdr_override() print('✔️ Libraries Loaded!')
code
128012739/cell_11
[ "text_plain_output_1.png" ]
from datetime import datetime import datetime import numpy as np import numpy as np import numpy as np # linear algebra import yfinance as yf import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time) spy = yf.download(ticker, start=start_time, end=end_time) start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy['5_day_MA'] = spy['Close'].rolling(window=5).mean() spy['30_day_MA'] = spy['Close'].rolling(window=30).mean() initial_capital = 100000 capital = initial_capital position = 0 stop_loss = None for i in range(30, len(spy) - 1): pct_change = spy['Close'][i] / spy['Close'][i - 1] - 1 if spy['5_day_MA'][i] > spy['30_day_MA'][i] and position <= 0: position = capital * 0.3 capital -= position stop_loss = spy['Close'][i] * 0.9 elif spy['5_day_MA'][i] < spy['30_day_MA'][i] and position >= 0: position = -capital * 0.2 capital -= position stop_loss = spy['Close'][i] * 1.1 if position > 0: if pct_change >= 0.1: additional_position = capital * 0.3 position += additional_position capital -= additional_position stop_loss = spy['Close'][i] * 0.9 elif pct_change >= 0.2: additional_position = capital * 0.4 position += additional_position capital -= additional_position stop_loss = spy['Close'][i] * 0.9 elif pct_change <= -0.1: capital += position position = 0 stop_loss = None elif position < 0: if pct_change <= -0.1: additional_position = capital * 0.3 position -= additional_position capital += additional_position stop_loss = spy['Close'][i] * 1.1 elif pct_change <= -0.2: additional_position = capital * 0.4 position -= additional_position capital += additional_position stop_loss = spy['Close'][i] * 1.1 elif pct_change >= 0.1: capital -= position position = 0 stop_loss = None portfolio_value = capital + position import numpy as np import yfinance as yf import pandas as pd def strategy(data, short_window, long_window, long_allocation, short_allocation): capital = 100000 position = 0 stop_loss = None for i in range(long_window, len(data) - 1): pct_change = data['Close'][i] / data['Close'][i - 1] - 1 if data[f'{short_window}_day_MA'][i] > data[f'{long_window}_day_MA'][i] and position <= 0: position = capital * long_allocation capital -= position stop_loss = data['Close'][i] * 0.9 elif data[f'{short_window}_day_MA'][i] < data[f'{long_window}_day_MA'][i] and position >= 0: position = -capital * short_allocation capital -= position stop_loss = data['Close'][i] * 1.1 return capital + position symbol = 'SPY' start_date = '2020-05-01' end_date = '2023-05-01' data = yf.download(symbol, start=start_date, end=end_date) best_result = 0 best_params = None short_windows = np.arange(3, 21, 2) long_windows = np.arange(20, 61, 5) long_allocations = np.arange(0.1, 0.51, 0.1) short_allocations = np.arange(0.1, 0.51, 0.1) for short_window in short_windows: for long_window in long_windows: if short_window >= long_window: continue data[f'{short_window}_day_MA'] = data['Close'].rolling(window=short_window).mean() data[f'{long_window}_day_MA'] = data['Close'].rolling(window=long_window).mean() for long_allocation in long_allocations: for short_allocation in short_allocations: result = strategy(data, short_window, long_window, long_allocation, short_allocation) if result > best_result: best_result = result best_params = (short_window, long_window, long_allocation, short_allocation) print(f'Best result: {best_result}') print(f'Best parameters: {best_params}')
code
128012739/cell_8
[ "text_plain_output_1.png" ]
from datetime import datetime from plotly import offline import datetime import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_datareader.data as web import plotly.graph_objects as go import time import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time) connected = False while not connected: try: ticker_df = web.get_data_yahoo(ticker, start=start_time, end=end_time) connected = True except Exception as e: time.sleep(5) pass ticker_df = ticker_df.reset_index() df = pd.DataFrame(ticker_df) df = df[df['Volume'] != 0] df.reset_index(drop=True, inplace=True) df.isna().sum() ma5 = df['Close'].rolling(window=5).mean() trace_ma5 = go.Scatter(x=df.Date, y=ma5, name='5-day Moving Average', line=dict(color='blue')) ma30 = df['Close'].rolling(window=30).mean() trace_ma30 = go.Scatter(x=df.Date, y=ma30, name='30-day Moving Average', line=dict(color='orange')) trace_ma5 = go.Scatter(x=df.Date, y=ma5, name='5-day Moving Average', line=dict(color='blue')) trace_ma30 = go.Scatter(x=df.Date, y=ma30, name='30-day Moving Average', line=dict(color='orange')) trace = go.Candlestick(x=df.Date, open=df.Open, high=df.High, low=df.Low, close=df.Close) chart_data = [trace, trace_ma30, trace_ma5] layout = go.Layout(title='SPDR S&P 500 ETF Trust (SPY) with Moving Average', width=1800, height=900) figure = go.Figure(data=chart_data, layout=layout) offline.iplot(figure)
code
128012739/cell_10
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from datetime import datetime import datetime import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time) spy = yf.download(ticker, start=start_time, end=end_time) start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy['5_day_MA'] = spy['Close'].rolling(window=5).mean() spy['30_day_MA'] = spy['Close'].rolling(window=30).mean() initial_capital = 100000 capital = initial_capital position = 0 stop_loss = None for i in range(30, len(spy) - 1): pct_change = spy['Close'][i] / spy['Close'][i - 1] - 1 if spy['5_day_MA'][i] > spy['30_day_MA'][i] and position <= 0: position = capital * 0.3 capital -= position stop_loss = spy['Close'][i] * 0.9 elif spy['5_day_MA'][i] < spy['30_day_MA'][i] and position >= 0: position = -capital * 0.2 capital -= position stop_loss = spy['Close'][i] * 1.1 if position > 0: if pct_change >= 0.1: additional_position = capital * 0.3 position += additional_position capital -= additional_position stop_loss = spy['Close'][i] * 0.9 elif pct_change >= 0.2: additional_position = capital * 0.4 position += additional_position capital -= additional_position stop_loss = spy['Close'][i] * 0.9 elif pct_change <= -0.1: capital += position position = 0 stop_loss = None elif position < 0: if pct_change <= -0.1: additional_position = capital * 0.3 position -= additional_position capital += additional_position stop_loss = spy['Close'][i] * 1.1 elif pct_change <= -0.2: additional_position = capital * 0.4 position -= additional_position capital += additional_position stop_loss = spy['Close'][i] * 1.1 elif pct_change >= 0.1: capital -= position position = 0 stop_loss = None portfolio_value = capital + position print(f'Initial capital: {initial_capital}') print(f'Final portfolio value: {portfolio_value}')
code
128012739/cell_5
[ "text_html_output_1.png" ]
from datetime import datetime import datetime import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_datareader.data as web import time import yfinance as yf ticker = 'SPY' start_time = datetime(2020, 5, 1) end_time = datetime(2023, 5, 1) spy = yf.download(ticker, start=start_time, end=end_time) connected = False while not connected: try: ticker_df = web.get_data_yahoo(ticker, start=start_time, end=end_time) connected = True print('connected to yahoo') except Exception as e: print('type error: ' + str(e)) time.sleep(5) pass ticker_df = ticker_df.reset_index() df = pd.DataFrame(ticker_df) print(ticker_df)
code
74072152/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() plt.figure(figsize=(10, 7)) sns.heatmap(df.corr(), annot=True, linewidths=0.2, fmt='.1f', cmap='coolwarm') plt.show()
code
74072152/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') print(df.Outcome.value_counts())
code
74072152/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression model = LogisticRegression() model = model.fit(X_train, y_train) score = model.predict(X_train) pred = model.predict(X_test) model.coef_
code
74072152/cell_23
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() outcome_column = df['Outcome'] outcome_column.head()
code
74072152/cell_33
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression model = LogisticRegression() model = model.fit(X_train, y_train) score = model.predict(X_train) pred = model.predict(X_test) print('Model Accuracy is : ', pred)
code
74072152/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() plt.figure(figsize=(10, 5)) sns.barplot('Outcome', 'Pregnancies', data=df) plt.title('Bars of Outcome and Pregnancies') plt.xlabel('Outcome') plt.ylabel('Pregnancies') plt.show()
code
74072152/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.head()
code
74072152/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74072152/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.info()
code
74072152/cell_32
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression model = LogisticRegression() model = model.fit(X_train, y_train) score = model.predict(X_train) print('Training Score: ', model.score(X_train, y_train)) print('Testing Score: ', model.score(X_test, y_test))
code
74072152/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() featureList = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI'] featureList = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI'] print(df[featureList].isin({0}).sum())
code
74072152/cell_35
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix model = LogisticRegression() model = model.fit(X_train, y_train) score = model.predict(X_train) pred = model.predict(X_test) accuracy_score(y_test, pred)
code
74072152/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() featureList = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI'] print(df[featureList].isin({0}).sum())
code
74072152/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum() feature_columns = df[['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']] feature_columns.head()
code
74072152/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe
code
74072152/cell_27
[ "image_output_1.png" ]
print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape)
code
74072152/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/diabetes-prediction-using-logistic-regression/diabetes-dataset.csv') df.describe df.isna().sum()
code
106205967/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01) preds, targs = learn.get_preds()
code
106205967/cell_23
[ "text_html_output_2.png" ]
import numpy as np targs = targs.numpy() preds = np.argmax(preds.numpy(), axis=-1) print(preds[0:3]) print(targs[0:3])
code
106205967/cell_33
[ "text_html_output_3.png" ]
import numpy as np import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) targs = targs.numpy() preds = np.argmax(preds.numpy(), axis=-1) tpreds2 = np.argmax(tpreds, axis=-1) print(testY) print(tpreds2)
code
106205967/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') display(data[0:3].T) display(data.info()) display(data.select_dtypes(include='object').columns.tolist()) display(data.select_dtypes(include='float64').columns.tolist())
code
106205967/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01) preds, targs = learn.get_preds() learn.show_results()
code
106205967/cell_26
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01) preds, targs = learn.get_preds() learn.recorder.plot_sched()
code
106205967/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] print(len(train), len(test)) testY = test['class'].map(normal_mapping)
code
106205967/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01)
code
106205967/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') data['class'] = data['cost'].apply(lambda x: ('00' + str(int(x // 10)))[-2:]) display(data['class'])
code
106205967/cell_18
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide))
code
106205967/cell_32
[ "image_output_1.png" ]
import numpy as np targs = targs.numpy() preds = np.argmax(preds.numpy(), axis=-1) print(tpreds[0:3]) tpreds2 = np.argmax(tpreds, axis=-1) print(tpreds2[0:3])
code
106205967/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) print(Name) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) print(normal_mapping)
code
106205967/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn
code
106205967/cell_31
[ "image_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01) preds, targs = learn.get_preds() tst_dl = learn.dls.test_dl(test) tpreds, _ = learn.get_preds(dl=tst_dl)
code
106205967/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls
code
106205967/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
display(preds) display(targs)
code
106205967/cell_10
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') data = data.drop('cost', axis=1) m = len(data) print(m) M = list(range(m)) random.seed(2021) random.shuffle(M)
code
106205967/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) dls = TabularPandas(train, splits=splits, cat_names=['food_category', 'food_department', 'food_family', 'promotion_name', 'sales_country', 'marital_status', 'gender', 'education', 'member_card', 'occupation', 'houseowner', 'avg. yearly_income', 'brand_name', 'store_type', 'store_city', 'store_state', 'media_type'], cont_names=['store_sales(in millions)', 'store_cost(in millions)', 'unit_sales(in millions)', 'total_children', 'avg_cars_at home(approx)', 'num_children_at_home', 'avg_cars_at home(approx).1', 'SRP', 'gross_weight', 'net_weight', 'recyclable_package', 'low_fat', 'units_per_case', 'store_sqft', 'grocery_sqft', 'frozen_sqft', 'meat_sqft', 'coffee_bar', 'video_store', 'salad_bar', 'prepared_food', 'florist'], y_names='class', y_block=CategoryBlock(), procs=[Categorify, FillMissing, Normalize]).dataloaders(path='.') dls learn = tabular_learner(dls, layers=[200, 100], metrics=accuracy) learn learn.lr_find(suggest_funcs=(valley, slide)) learn.fit_one_cycle(10, 0.01) preds, targs = learn.get_preds() interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix()
code
106205967/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd from fastai.tabular.all import * pd.options.display.float_format = '{:.2f}'.format set_seed(42) data = pd.read_csv('../input/medias-cost-prediction-in-foodmart/media prediction and its cost.csv') Name = sorted(data['class'].unique().tolist()) N = list(range(len(Name))) normal_mapping = dict(zip(Name, N)) reverse_mapping = dict(zip(N, Name)) data = data.drop('cost', axis=1) m = len(data) M = list(range(m)) random.seed(2021) random.shuffle(M) train = data.iloc[M[0:m // 4 * 3]] test = data.iloc[M[m // 4 * 3:]] testY = test['class'].map(normal_mapping) splits = RandomSplitter(seed=42)(train) display(splits) print(len(splits[0]), len(splits[1]))
code
33104580/cell_4
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33104580/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime, timedelta import numpy as np import pandas as pd latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] print('Number of counties: ' + str(df.index.nunique()))
code
33104580/cell_11
[ "text_plain_output_1.png" ]
from datetime import datetime, timedelta import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.figure(figsize=(10, 5)) sns.distplot(df.likely_infected_high, hist=True, kde=False, color='red', hist_kws={'edgecolor': 'black', 'linewidth': 1}, kde_kws={'linewidth': 2}) print('Summary Statistic of Percetnage of Population Likely Infected across counties: \n') print(df.likely_infected_high.describe()) plt.xlim(0, 1) plt.title('Distribution of county population likely infected') plt.xlabel('Percentage of population likely infected') plt.show()
code
33104580/cell_19
[ "text_plain_output_1.png" ]
from datetime import datetime, timedelta from urllib.request import urlopen import json import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import plotly.express as px import plotly.express as px import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.xlim(0, 1) def show_values_on_bars(axs, h_v="v", space=0.4, text_size=10): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = p.get_height() ax.text(_x, _y, value, ha="center", size=text_size) elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height()- float(0.2) value = p.get_width() value = "{:.1%}".format(value) ax.text(_x, _y, value, ha="left", size=text_size) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) plt.figure(figsize=(10,10)) g=sns.barplot(x='likely_infected_low', y='county_state',data=df.sort_values(['likely_infected_low'], ascending=False).head(20), color="lightgreen") show_values_on_bars(g, "h", space=0.01, text_size=10) plt.xlim(0, 1.1) plt.xlabel("Percentage of population infected") plt.ylabel("County, State") plt.title("Likely spread of Virus if spread is 28 fold") import plotly.express as px with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) fig = px.choropleth(df, geojson=counties, locations='county_fips', color='likely_infected_low', color_continuous_scale='Reds', range_color=(0, 0.2), scope='usa', title='Percentage of population likely already infected if spread is 28 folds', hover_name='county_state', hover_data=['confirmed', 'deaths'], labels={'likely_infected_low': '% Likely Infected', 'confirmed': 'Confirmed Cases ', 'deaths': 'Deaths '}) fig.update_layout(margin={'r': 0, 't': 30, 'l': 0, 'b': 0}) fig.layout.template = None config = dict({'scrollZoom': False}) fig.write_html('1.html') fig = px.choropleth(df, geojson=counties, locations='county_fips', color='likely_infected_high', color_continuous_scale='Reds', range_color=(0, 0.2), scope='usa', title='Percentage of population likely already infected if spread is 80 folds', hover_name='county_state', hover_data=['confirmed', 'deaths'], labels={'likely_infected_high': '% Likely Infected', 'confirmed': 'Confirmed Cases ', 'deaths': 'Deaths '}) fig.update_layout(margin={'r': 0, 't': 30, 'l': 0, 'b': 0}) fig.layout.template = None config = dict({'scrollZoom': False}) fig.show(config=config) fig.write_html('2.html')
code
33104580/cell_7
[ "image_output_1.png" ]
from datetime import datetime, timedelta import numpy as np import pandas as pd latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') df.head()
code
33104580/cell_8
[ "text_html_output_1.png" ]
!pip install chart_studio !pip install plotly-geo
code
33104580/cell_17
[ "text_html_output_1.png" ]
from datetime import datetime, timedelta from urllib.request import urlopen import json import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import plotly.express as px import plotly.express as px import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.xlim(0, 1) def show_values_on_bars(axs, h_v="v", space=0.4, text_size=10): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = p.get_height() ax.text(_x, _y, value, ha="center", size=text_size) elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height()- float(0.2) value = p.get_width() value = "{:.1%}".format(value) ax.text(_x, _y, value, ha="left", size=text_size) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) plt.figure(figsize=(10,10)) g=sns.barplot(x='likely_infected_low', y='county_state',data=df.sort_values(['likely_infected_low'], ascending=False).head(20), color="lightgreen") show_values_on_bars(g, "h", space=0.01, text_size=10) plt.xlim(0, 1.1) plt.xlabel("Percentage of population infected") plt.ylabel("County, State") plt.title("Likely spread of Virus if spread is 28 fold") import plotly.express as px with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) fig = px.choropleth(df, geojson=counties, locations='county_fips', color='likely_infected_low', color_continuous_scale='Reds', range_color=(0, 0.2), scope='usa', title='Percentage of population likely already infected if spread is 28 folds', hover_name='county_state', hover_data=['confirmed', 'deaths'], labels={'likely_infected_low': '% Likely Infected', 'confirmed': 'Confirmed Cases ', 'deaths': 'Deaths '}) fig.update_layout(margin={'r': 0, 't': 30, 'l': 0, 'b': 0}) fig.layout.template = None config = dict({'scrollZoom': False}) fig.show(config=config) fig.write_html('1.html')
code
33104580/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime, timedelta import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.xlim(0, 1) def show_values_on_bars(axs, h_v="v", space=0.4, text_size=10): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = p.get_height() ax.text(_x, _y, value, ha="center", size=text_size) elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height()- float(0.2) value = p.get_width() value = "{:.1%}".format(value) ax.text(_x, _y, value, ha="left", size=text_size) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) plt.figure(figsize=(10,10)) g=sns.barplot(x='likely_infected_low', y='county_state',data=df.sort_values(['likely_infected_low'], ascending=False).head(20), color="lightgreen") show_values_on_bars(g, "h", space=0.01, text_size=10) plt.xlim(0, 1.1) plt.xlabel("Percentage of population infected") plt.ylabel("County, State") plt.title("Likely spread of Virus if spread is 28 fold") df_temp = df[df.confirmed > 500] plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) g = sns.barplot(x='mortality', y='county_state', data=df[df.confirmed > 500].sort_values(['mortality'], ascending=False).head(10), color='red') show_values_on_bars(g, 'h', space=0.002, text_size=20) plt.xlim(0, 0.2) plt.xlabel('Covid Mortality Rate', size=20) plt.ylabel(' ', size=20) plt.yticks(size=15) plt.title('Counties with highest Covid Mortality', size=25) plt.subplot(1, 2, 2) g = sns.barplot(x='mortality', y='county_state', data=df[df.confirmed > 500].sort_values(['mortality'], ascending=True).head(10), color='blue') show_values_on_bars(g, 'h', space=0.002, text_size=20) plt.xlim(0, 0.05) plt.xlabel('Covid Mortality Rate', size=20) plt.ylabel(' ') plt.yticks(size=15) plt.title('Counties with lowest Covid Mortality', size=25) plt.tight_layout()
code
33104580/cell_14
[ "text_plain_output_1.png" ]
from datetime import datetime, timedelta import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.xlim(0, 1) def show_values_on_bars(axs, h_v='v', space=0.4, text_size=10): def _show_on_single_plot(ax): if h_v == 'v': for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = p.get_height() ax.text(_x, _y, value, ha='center', size=text_size) elif h_v == 'h': for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height() - float(0.2) value = p.get_width() value = '{:.1%}'.format(value) ax.text(_x, _y, value, ha='left', size=text_size) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) plt.figure(figsize=(10, 10)) g = sns.barplot(x='likely_infected_low', y='county_state', data=df.sort_values(['likely_infected_low'], ascending=False).head(20), color='lightgreen') show_values_on_bars(g, 'h', space=0.01, text_size=10) plt.xlim(0, 1.1) plt.xlabel('Percentage of population infected') plt.ylabel('County, State') plt.title('Likely spread of Virus if spread is 28 fold')
code
33104580/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime, timedelta import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os from urllib.request import urlopen import json import plotly.express as px import plotly.offline as py import plotly.graph_objects as go from plotly.subplots import make_subplots import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datetime import datetime, timedelta import warnings warnings.filterwarnings('ignore') latest_date = datetime.today() - timedelta(days=1) latest_date = latest_date.strftime('%m/%d/%y')[1:] df_cases = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv')[['countyFIPS', 'County Name', 'State', latest_date]] df_cases = df_cases.rename(columns={'countyFIPS': 'county_fips', latest_date: 'confirmed'}).set_index('county_fips') df_deaths = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv')[['countyFIPS', latest_date]] df_deaths = df_deaths.rename(columns={'countyFIPS': 'county_fips', latest_date: 'deaths'}).set_index('county_fips') df_pop = pd.read_csv('https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_county_population_usafacts.csv')[['countyFIPS', 'population']] df_pop = df_pop.rename(columns={'countyFIPS': 'county_fips'}).set_index('county_fips') df = df_cases.join(df_deaths) df = df.join(df_pop) df = df[df.index > 999] df = df[df.population > 0] del df_cases, df_deaths, df_pop df['mortality'] = df['deaths'] / df['confirmed'] df['mortality'] = df['mortality'].fillna(0) df['deaths_per_million'] = df['deaths'] * 1000000 / df['population'] df['cases_per_million'] = df['confirmed'] * 1000000 / df['population'] df['likely_infected_high'] = np.round(df['confirmed'] * 80 / df['population'], 2) df['likely_infected_high'] = np.clip(df['likely_infected_high'], 0, 1) df['likely_infected_low'] = np.round(df['confirmed'] * 28 / df['population'], 2) df['likely_infected_low'] = np.clip(df['likely_infected_low'], 0, 1) df['county_state'] = df['County Name'] + ', ' + df['State'] df_county_stats = pd.read_csv('/kaggle/input/uncover/county_health_rankings/county_health_rankings/us-county-health-rankings-2020.csv')[['fips', 'segregation_index', 'percent_black', 'median_household_income', 'percent_adults_with_obesity', 'percent_smokers', 'percent_with_access_to_exercise_opportunities', 'percent_some_college', 'percent_unemployed', 'percent_children_in_poverty']] df_county_stats = df_county_stats.rename(columns={'fips': 'county_fips', 'segregation_index': 'segregation_level'}).set_index('county_fips') df = df.join(df_county_stats) df_county_stats = pd.read_csv('/kaggle/input/county-ranking-data/county_ranking.csv')[['fipscode', 'v052_rawvalue', 'v053_rawvalue', 'v044_rawvalue', 'v147_rawvalue', 'v002_cilow', 'v136_other_data_2']] df_county_stats = df_county_stats.rename(columns={'fipscode': 'county_fips', 'v052_rawvalue': 'percent_below_18', 'v053_rawvalue': 'percent_above_65', 'v044_rawvalue': 'income_inequality', 'v147_rawvalue': 'life_expectancy', 'v002_cilow': 'poor_fair_health', 'v136_other_data_2': 'over_crowding'}).set_index('county_fips') df = df.join(df_county_stats) df = df.reset_index() df['county_fips'] = df['county_fips'].astype(str).str.rjust(5, '0') plt.xlim(0, 1) def show_values_on_bars(axs, h_v="v", space=0.4, text_size=10): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = p.get_height() ax.text(_x, _y, value, ha="center", size=text_size) elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height()- float(0.2) value = p.get_width() value = "{:.1%}".format(value) ax.text(_x, _y, value, ha="left", size=text_size) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) plt.figure(figsize=(10,10)) g=sns.barplot(x='likely_infected_low', y='county_state',data=df.sort_values(['likely_infected_low'], ascending=False).head(20), color="lightgreen") show_values_on_bars(g, "h", space=0.01, text_size=10) plt.xlim(0, 1.1) plt.xlabel("Percentage of population infected") plt.ylabel("County, State") plt.title("Likely spread of Virus if spread is 28 fold") plt.figure(figsize=(10, 5)) df_temp = df[df.confirmed > 500] sns.distplot(df_temp.mortality, hist=True, kde=False, color='green', hist_kws={'edgecolor': 'black', 'linewidth': 1}, kde_kws={'linewidth': 2}) plt.title('Distribution of Mortality Rate') plt.xlabel('Covid Mortality Rate')
code
130007697/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130007697/cell_18
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt import torch import cv2 as cv import matplotlib.pyplot as plt img = cv.imread('/kaggle/input/car-plate-detection/images/Cars0.png') img = cv.cvtColor(img, cv.COLOR_BGR2RGB) rec = cv.rectangle(img, (226, 125), (419, 173), (0, 250, 0), 2) rec = cv.circle(rec, ((226 + 419) // 2, (125 + 173) // 2), 2, (255, 0, 0), 2) import torch yolo = torch.hub.load('ultralytics/yolov5', 'custom', path='/kaggle/working/yolov5/runs/train/exp/weights/best.pt') img = '/kaggle/input/number-plate-detection/TEST/TEST.jpeg' results = yolo(img) cordinates = results.xyxy[0][:, :-1] results.pandas().xyxy[0]
code
130007697/cell_16
[ "text_plain_output_1.png" ]
import torch import torch yolo = torch.hub.load('ultralytics/yolov5', 'custom', path='/kaggle/working/yolov5/runs/train/exp/weights/best.pt')
code