path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16144426/cell_19
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.metrics import accuracy_score model = RandomForestClassifier(n_estimators=800) model.fit(xtrain, ytrain) test_pred = model.predict(xtest) accuracy_score(ytest, test_pred)
code
16144426/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import gensim import nltk import os print(os.listdir('../input/embeddings/GoogleNews-vectors-negative300/'))
code
16144426/cell_7
[ "text_plain_output_1.png" ]
import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] print(doc1) print(nltk.word_tokenize(doc1.lower()))
code
16144426/cell_28
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head() url = 'https://bit.ly/2W21FY7' data = pd.read_csv(url) data.shape docs = data.loc[:, 'Lower_Case_Reviews'] docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [stemmer.stem(word) for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.head()
code
16144426/cell_8
[ "text_plain_output_1.png" ]
import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] docs.head()
code
16144426/cell_15
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape docs_vectors.head()
code
16144426/cell_16
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head()
code
16144426/cell_3
[ "text_html_output_1.png" ]
import gensim path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) list(embeddings['modi'][:5])
code
16144426/cell_31
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import CountVectorizer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head() X = docs_vectors.drop([64, 590]) Y = data['sentiment'].drop([64, 590]) url = 'https://bit.ly/2W21FY7' data = pd.read_csv(url) data.shape docs = data.loc[:, 'Lower_Case_Reviews'] Y = data['Sentiment_Manual'] Y.value_counts() docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [stemmer.stem(word) for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) X = docs_clean (X.shape, Y.shape) from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(min_df=5) cv.fit(X)
code
16144426/cell_24
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head() url = 'https://bit.ly/2W21FY7' data = pd.read_csv(url) data.shape docs = data.loc[:, 'Lower_Case_Reviews'] print(docs.shape) docs.head()
code
16144426/cell_14
[ "text_html_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape
code
16144426/cell_22
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head() url = 'https://bit.ly/2W21FY7' data = pd.read_csv(url) data.shape
code
16144426/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: temp
code
16144426/cell_27
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.shape docs_vectors = pd.DataFrame() for doc in docs_clean: words = nltk.word_tokenize(doc) temp = pd.DataFrame() for word in words: try: word_vec = embeddings[word] temp = temp.append(pd.Series(word_vec), ignore_index=True) except: pass docs_vectors = docs_vectors.append(temp.mean(), ignore_index=True) docs_vectors.shape pd.isnull(docs_vectors).sum(axis=1).sort_values(ascending=False).head() url = 'https://bit.ly/2W21FY7' data = pd.read_csv(url) data.shape docs = data.loc[:, 'Lower_Case_Reviews'] docs = docs.str.lower().str.replace('[^a-z ]', '') docs.head()
code
16144426/cell_12
[ "text_plain_output_1.png" ]
from nltk.stem import PorterStemmer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) pd.Series(embeddings['modi'][:5]) embeddings.most_similar('modi', topn=10) url = 'https://bit.ly/2S2yXEd' data = pd.read_csv(url) doc1 = data.iloc[0, 0] docs = data['review'] words = nltk.word_tokenize(doc1.lower()) temp = pd.DataFrame() for word in words: try: temp = temp.append(pd.Series(embeddings[word][:5]), ignore_index=True) except: docs = docs.str.lower().str.replace('[^a-z ]', '') from nltk.stem import PorterStemmer stemmer = PorterStemmer() stopwords = nltk.corpus.stopwords.words('english') def clean_doc(doc): words = doc.split(' ') words_clean = [word for word in words if word not in stopwords] doc_clean = ' '.join(words_clean) return doc_clean docs_clean = docs.apply(clean_doc) docs_clean.head()
code
16144426/cell_5
[ "text_plain_output_1.png" ]
import gensim path = '../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) embeddings.most_similar('modi', topn=10)
code
16123290/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_test_set.head()
code
16123290/cell_9
[ "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) iplot(fig)
code
16123290/cell_4
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) iplot(fig)
code
16123290/cell_20
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import matplotlib.pyplot as plt import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_all = pd.concat([df_test_set, df_train_set], sort=False) start_date = '2014-01-01' end_date = '2014-01-31' fig, ax = plt.subplots(1) df_all[['PJME_MW', 'MW_Prediction']].plot(ax=ax, figsize=(15, 5), style=['.']) ax.set_xbound(lower=start_date, upper=end_date)
code
16123290/cell_29
[ "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import matplotlib.pyplot as plt import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_all = pd.concat([df_test_set, df_train_set], sort=False) start_date ='2014-01-01' end_date = '2014-01-31' fig,ax = plt.subplots(1) df_all[['PJME_MW' , 'MW_Prediction']].plot(ax=ax , figsize = (15,5 ) , style = ['.']); ax.set_xbound(lower=start_date , upper= end_date) start_date = '2015-02-20 00:00:00' end_date = '2015-02-20 23:00:00' fig, ax = plt.subplots(1) df_all[['PJME_MW', 'MW_Prediction']].plot(ax=ax, figsize=(15, 5), style=['.']) ax.set_xbound(lower=start_date, upper=end_date)
code
16123290/cell_26
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_test_set['AbsError'] = df_test_set.Error.apply(np.abs) day_groupby = df_test_set.groupby(['year', 'month', 'dayofmonth']) error_by_day = day_groupby['PJME_MW', 'MW_Prediction', 'Error', 'AbsError'].mean() error_by_day.sort_values(ascending=True, by='AbsError').head(15)
code
16123290/cell_11
[ "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW']
code
16123290/cell_1
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import xgboost as xgb from xgboost import plot_importance, plot_tree from sklearn.metrics import mean_squared_error, mean_absolute_error import plotly.plotly as py from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) import plotly.graph_objs as go import os print(os.listdir('../input'))
code
16123290/cell_7
[ "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] df_test_set.head(2)
code
16123290/cell_18
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_all = pd.concat([df_test_set, df_train_set], sort=False) df_all[['PJME_MW', 'MW_Prediction']].plot(figsize=(15, 5))
code
16123290/cell_32
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from xgboost import plot_importance, plot_tree import matplotlib.pyplot as plt import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go import xgboost as xgb df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] model = xgb.XGBRegressor(learning_rate=0.01, n_estimators=1000, max_depth=3, subsample=0.8, colsample_bylevel=1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], early_stopping_rounds=50, verbose=False) df_test_set['MW_Prediction'] = model.predict(X_test) df_all = pd.concat([df_test_set, df_train_set], sort=False) start_date ='2014-01-01' end_date = '2014-01-31' fig,ax = plt.subplots(1) df_all[['PJME_MW' , 'MW_Prediction']].plot(ax=ax , figsize = (15,5 ) , style = ['.']); ax.set_xbound(lower=start_date , upper= end_date) start_date ='2015-02-20 00:00:00' end_date = '2015-02-20 23:00:00' fig,ax = plt.subplots(1) df_all[['PJME_MW' , 'MW_Prediction']].plot(ax=ax , figsize = (15,5 ) , style = ['.']); ax.set_xbound(lower=start_date , upper= end_date) plot_tree(model, num_trees=1, rankdir='LR') plt.show() plt.rcParams['figure.figsize'] = (100, 70)
code
16123290/cell_28
[ "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_test_set['AbsError'] = df_test_set.Error.apply(np.abs) day_groupby = df_test_set.groupby(['year', 'month', 'dayofmonth']) error_by_day = day_groupby['PJME_MW', 'MW_Prediction', 'Error', 'AbsError'].mean() error_by_day.sort_values(ascending=True, by='AbsError').head(15) error_by_day.sort_values(ascending=False, by='AbsError').head(15)
code
16123290/cell_8
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) iplot(fig)
code
16123290/cell_15
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from xgboost import plot_importance, plot_tree import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go import xgboost as xgb df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] model = xgb.XGBRegressor(learning_rate=0.01, n_estimators=1000, max_depth=3, subsample=0.8, colsample_bylevel=1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], early_stopping_rounds=50, verbose=False) plot_importance(model)
code
16123290/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) df.plot(figsize=(15, 8))
code
16123290/cell_31
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] df_test_set['AbsError'] = df_test_set.Error.apply(np.abs) day_groupby = df_test_set.groupby(['year', 'month', 'dayofmonth']) error_by_day = day_groupby['PJME_MW', 'MW_Prediction', 'Error', 'AbsError'].mean() error_by_day.sort_values(ascending=True, by='AbsError').head(15) error_by_day.sort_values(ascending=False, by='AbsError').head(15) error_by_day.sort_values(ascending=True, by='Error').head(15)
code
16123290/cell_14
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.graph_objs as go import plotly.graph_objs as go import xgboost as xgb df = pd.read_csv('../input/PJME_hourly.csv', index_col=[0], parse_dates=[0]) import plotly.graph_objs as go trace1 = go.Scatter(x=df.index, y=df.PJME_MW) data = [trace1] fig = dict(data=data) splitdate = '2014-01-01' df_train_set = df[df.index < splitdate] df_test_set = df[df.index > splitdate] trace1 = go.Scatter(x=df_train_set.index, y=df_train_set.PJME_MW) data = [trace1] fig = dict(data=data) trace1 = go.Scatter(x=df_test_set.index, y=df_test_set.PJME_MW) data = [trace1] fig = dict(data=data) def AddDateProperties(df): df['date'] = df.index df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df df_test_set = AddDateProperties(df_test_set) df_train_set = AddDateProperties(df_train_set) df_test_set = df_test_set.drop(['date'], axis=1) df_train_set = df_train_set.drop(['date'], axis=1) y_test = df_test_set['PJME_MW'] y_train = df_train_set['PJME_MW'] X_test = df_test_set.loc[:, df_test_set.columns != 'PJME_MW'] X_train = df_train_set.loc[:, df_train_set.columns != 'PJME_MW'] model = xgb.XGBRegressor(learning_rate=0.01, n_estimators=1000, max_depth=3, subsample=0.8, colsample_bylevel=1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], early_stopping_rounds=50, verbose=False)
code
73070243/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data[['Pclass', 'Survived']].groupby('Pclass').mean()
code
73070243/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') test_data.head()
code
73070243/cell_57
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): dataset.loc[dataset['Age'].isnull() & (dataset['Title'] == i), 'Age'] = guess[i - 1] dataset['Age'] = dataset['Age'].astype(int) for dataset in concatenate: dataset.loc[dataset['Age'] <= 16, 'Age'] = 1 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 2 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 3 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 4 dataset.loc[dataset['Age'] > 64, 'Age'] = 5 train_data = train_data.drop('Age_Group', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['IsAlone'] = 0 dataset.loc[dataset['Family'] == 1, 'IsAlone'] = 1 train_data[['IsAlone', 'Survived']].groupby(['IsAlone']).mean()
code
73070243/cell_33
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\\.', expand=False) pd.crosstab(train_data['Title'], train_data['Sex'])
code
73070243/cell_44
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) train_data.head()
code
73070243/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') v = sns.FacetGrid(train_data, col='Survived') v.map(plt.hist, 'Age', bins=10, color='red')
code
73070243/cell_55
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): dataset.loc[dataset['Age'].isnull() & (dataset['Title'] == i), 'Age'] = guess[i - 1] dataset['Age'] = dataset['Age'].astype(int) for dataset in concatenate: dataset.loc[dataset['Age'] <= 16, 'Age'] = 1 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 2 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 3 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 4 dataset.loc[dataset['Age'] > 64, 'Age'] = 5 train_data = train_data.drop('Age_Group', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Family'] = dataset['SibSp'] + dataset['Parch'] + 1 train_data[['Family', 'Survived']].groupby(['Family']).mean().sort_values(by='Survived', ascending=False)
code
73070243/cell_39
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) train_data.head()
code
73070243/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') v = sns.FacetGrid(train_data, col='Survived') v.map(plt.hist, 'Age', bins=10, color='red') v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(plt.hist, 'Age', bins=10, color='green') v.add_legend() v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(sns.scatterplot, 'Age', 'Sex', color='orange') v.add_legend() v = sns.FacetGrid(train_data, row='Embarked') v.map(sns.pointplot, 'Pclass', 'Survived', 'Sex') v.add_legend()
code
73070243/cell_48
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): dataset.loc[dataset['Age'].isnull() & (dataset['Title'] == i), 'Age'] = guess[i - 1] print(guess[i - 1]) dataset['Age'] = dataset['Age'].astype(int)
code
73070243/cell_41
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) train_data.info()
code
73070243/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\\.', expand=False) pd.crosstab(train_data['Title'], train_data['Sex']) category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) train_data['Age_Group'] = pd.cut(train_data['Age'], 5) train_data[['Age_Group', 'Survived']].groupby('Age_Group').mean()
code
73070243/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73070243/cell_45
[ "text_html_output_1.png" ]
import numpy as np # linear algebra guess = np.zeros(5) guess
code
73070243/cell_51
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): dataset.loc[dataset['Age'].isnull() & (dataset['Title'] == i), 'Age'] = guess[i - 1] dataset['Age'] = dataset['Age'].astype(int) for dataset in concatenate: dataset.loc[dataset['Age'] <= 16, 'Age'] = 1 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 2 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 3 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 4 dataset.loc[dataset['Age'] > 64, 'Age'] = 5 train_data.head()
code
73070243/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') v = sns.FacetGrid(train_data, col='Survived') v.map(plt.hist, 'Age', bins=10, color='red') v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(plt.hist, 'Age', bins=10, color='green') v.add_legend() v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(sns.scatterplot, 'Age', 'Sex', color='orange') v.add_legend() v = sns.FacetGrid(train_data, row='Embarked') v.map(sns.pointplot, 'Pclass', 'Survived', 'Sex') v.add_legend() v = sns.FacetGrid(train_data, col='Survived') v.map(sns.barplot, 'Fare') v.add_legend()
code
73070243/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') print(train_data.columns) print(train_data.info()) print(test_data.info())
code
73070243/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data[['Sex', 'Survived']].groupby('Sex').mean()
code
73070243/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data[['SibSp', 'Survived']].groupby('SibSp').mean().sort_values(by='Survived', ascending=False)
code
73070243/cell_47
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): gg = dataset[dataset['Title'] == i]['Age'].dropna() ages = gg.median() guess[i - 1] = ages print(guess)
code
73070243/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.head()
code
73070243/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data[['Parch', 'Survived']].groupby('Parch').mean().sort_values(by='Survived', ascending=False)
code
73070243/cell_35
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Miss') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Sir', 'Mr') dataset['Title'] = dataset['Title'].replace(['Col', 'Don', 'Rev', 'Dr', 'Major', 'Lady', 'Capt', 'Countess', 'Jonkheer', 'Dona'], 'uncommon') train_data[['Title', 'Survived']].groupby('Title').mean().sort_values(by='Survived', ascending=False)
code
73070243/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') v = sns.FacetGrid(train_data, col='Survived') v.map(plt.hist, 'Age', bins=10, color='red') v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(plt.hist, 'Age', bins=10, color='green') v.add_legend() v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(sns.scatterplot, 'Age', 'Sex', color='orange') v.add_legend()
code
73070243/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') v = sns.FacetGrid(train_data, col='Survived') v.map(plt.hist, 'Age', bins=10, color='red') v = sns.FacetGrid(train_data, col='Survived', row='Pclass') v.map(plt.hist, 'Age', bins=10, color='green') v.add_legend()
code
73070243/cell_53
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data = train_data.drop('Name', axis=1) test_data = test_data.drop('Name', axis=1) concatenate = [train_data, test_data] for dataset in concatenate: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess = np.zeros(5) guess for dataset in concatenate: for i in range(1, 6): dataset.loc[dataset['Age'].isnull() & (dataset['Title'] == i), 'Age'] = guess[i - 1] dataset['Age'] = dataset['Age'].astype(int) for dataset in concatenate: dataset.loc[dataset['Age'] <= 16, 'Age'] = 1 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 2 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 3 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 4 dataset.loc[dataset['Age'] > 64, 'Age'] = 5 train_data = train_data.drop('Age_Group', axis=1) concatenate = [train_data, test_data] train_data.head()
code
73070243/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.describe(include=['O'])
code
73070243/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') concatenate = [train_data, test_data] train_data = train_data.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) concatenate = [train_data, test_data] category = {'Mr': 1, 'Mrs': 2, 'Miss': 3, 'Master': 4, 'uncommon': 5} for dataset in concatenate: dataset['Title'] = dataset['Title'].map(category) train_data.head()
code
1005662/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn import tree from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1005662/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') combine = [train_data, test_data] print(train_data.columns.values) print(train_data.head()) print(train_data.describe()) train_data.shape
code
17136141/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) print('Max year :', max_year) print('Min year :', min_year)
code
17136141/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) suicide_df.plot(x='generation', y='suicides_no', linestyle='', marker='o')
code
17136141/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.info()
code
17136141/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) suicide_df.groupby('year')['suicides_no'].sum().plot(kind='bar', figsize=(15, 10), cmap='summer')
code
17136141/cell_6
[ "text_html_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any()
code
17136141/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) suicide_df.groupby('age')['suicides_no'].sum().plot(kind='bar', cmap='rainbow')
code
17136141/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() sns.catplot('country', 'population', hue='age', data=suicide_df)
code
17136141/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) suicide_df.groupby('sex')['suicides_no'].sum().plot(kind='bar', cmap='RdBu')
code
17136141/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) df = suicide_df[['country', 'suicides_no']] df1 = df.groupby('country').sum() df1 = df1.sort_values(by='suicides_no', ascending=False).reset_index() df1 = df1.loc[df1['suicides_no'] > 1000] df1.head()
code
17136141/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.head()
code
17136141/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) df = suicide_df[['country', 'suicides_no']] df1 = df.groupby('country').sum() df1 = df1.sort_values(by='suicides_no', ascending=False).reset_index() df1 = df1.loc[df1['suicides_no'] > 1000] plt.figure(figsize=(15, 20)) sns.barplot(x='suicides_no', y='country', data=df1)
code
17136141/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) df = suicide_df[['country', 'suicides_no']] df1 = df.groupby('country').sum() df1 = df1.sort_values(by='suicides_no', ascending=False).reset_index() df1 = df1.loc[df1['suicides_no'] > 1000] plt.figure(figsize=(10, 6)) sns.countplot(x='generation', hue='sex', data=suicide_df)
code
17136141/cell_22
[ "text_html_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) pop = suicide_df[['country', 'population', 'suicides_no']] pop.head()
code
17136141/cell_12
[ "text_html_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.isnull().any() suicide_df = suicide_df.drop(['HDI for year', 'country-year', 'gdp_per_capita ($)'], axis=1) min_year = min(suicide_df.year) max_year = max(suicide_df.year) df = suicide_df[['country', 'suicides_no']] df.head()
code
17136141/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd suicide_df = pd.read_csv('../input/master.csv') suicide_df.describe()
code
73067872/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df
code
73067872/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.Survived
code
73067872/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df
code
73067872/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/titanic/train.csv') df.info()
code
32062482/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd apple_mobility_df = pd.read_csv('../input/apple-mobility-trends-updated-daily/Apple_Mobility_2020-04-13.csv') apple_mobility_df.drop('Unnamed: 0', axis=1, inplace=True) geo_mask = apple_mobility_df['geo_type'] == 'country/region' mobility_countries = apple_mobility_df[geo_mask] mobility_cities = apple_mobility_df[~geo_mask] def get_trans_count(df): name = df['geo_type'].iloc[0] return df['transportation_type'].value_counts().rename(str(name)) transport_types_count = pd.concat([get_trans_count(mobility_countries), get_trans_count(mobility_cities)], axis=1, sort=False) transport_types_count
code
32062482/cell_6
[ "text_html_output_2.png" ]
import pandas as pd import plotly.express as px import pandas as pd apple_mobility_df = pd.read_csv('../input/apple-mobility-trends-updated-daily/Apple_Mobility_2020-04-13.csv') apple_mobility_df.drop('Unnamed: 0', axis=1, inplace=True) geo_mask = apple_mobility_df['geo_type'] == 'country/region' mobility_countries = apple_mobility_df[geo_mask] mobility_cities = apple_mobility_df[~geo_mask] mobility_countries_melted = mobility_countries.melt(id_vars=['geo_type', 'region', 'transportation_type', 'lat', 'lng', 'population'], var_name='Date', value_name='pct_of_baseline') mobility_cities_melted = mobility_cities.melt(id_vars=['geo_type', 'region', 'transportation_type', 'lat', 'lng', 'population'], var_name='Date', value_name='pct_of_baseline') import plotly.express as px to_show = ['Atlanta', 'Athens'] df = mobility_cities_melted[mobility_cities_melted['region'].isin(to_show)] fig = px.line(df, x='Date', y='pct_of_baseline', color='transportation_type', line_group='region', hover_name='region') fig.show()
code
32062482/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd apple_mobility_df = pd.read_csv('../input/apple-mobility-trends-updated-daily/Apple_Mobility_2020-04-13.csv') apple_mobility_df.drop('Unnamed: 0', axis=1, inplace=True) apple_mobility_df.head()
code
32062482/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd apple_mobility_df = pd.read_csv('../input/apple-mobility-trends-updated-daily/Apple_Mobility_2020-04-13.csv') apple_mobility_df.drop('Unnamed: 0', axis=1, inplace=True) geo_mask = apple_mobility_df['geo_type'] == 'country/region' mobility_countries = apple_mobility_df[geo_mask] mobility_cities = apple_mobility_df[~geo_mask] print('There are a total of {} countires and {} cities with provided mobility data.'.format(len(mobility_countries), len(mobility_cities)))
code
32062482/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd apple_mobility_df = pd.read_csv('../input/apple-mobility-trends-updated-daily/Apple_Mobility_2020-04-13.csv') apple_mobility_df.drop('Unnamed: 0', axis=1, inplace=True) geo_mask = apple_mobility_df['geo_type'] == 'country/region' mobility_countries = apple_mobility_df[geo_mask] mobility_cities = apple_mobility_df[~geo_mask] mobility_countries_melted = mobility_countries.melt(id_vars=['geo_type', 'region', 'transportation_type', 'lat', 'lng', 'population'], var_name='Date', value_name='pct_of_baseline') mobility_cities_melted = mobility_cities.melt(id_vars=['geo_type', 'region', 'transportation_type', 'lat', 'lng', 'population'], var_name='Date', value_name='pct_of_baseline') mobility_cities_melted.head()
code
106211205/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum() data.columns
code
106211205/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum()
code
106211205/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data
code
106211205/cell_6
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.describe()
code
106211205/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum() data.columns columns = ['Age', 'Annual Income (k$)', 'Spending Score (1-100)'] sns.pairplot(data)
code
106211205/cell_7
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes
code
106211205/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum() data.columns columns = ['Age', 'Annual Income (k$)', 'Spending Score (1-100)'] for i in columns: plt.figure() sns.kdeplot(data[i], hue=data['Gender'], shade=True)
code
106211205/cell_15
[ "text_plain_output_1.png" ]
columns = ['Age', 'Annual Income (k$)', 'Spending Score (1-100)'] columns
code
106211205/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum() data.columns columns = ['Age', 'Annual Income (k$)', 'Spending Score (1-100)'] for i in columns: plt.figure() sns.distplot(data[i])
code
106211205/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.dtypes data.isnull().sum() sns.distplot(data['Annual Income (k$)'])
code
322326/cell_2
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') sns.countplot(x='outcome', data=act_df) sns.plt.show()
code
322326/cell_3
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') sns.countplot(x='activity_category', data=act_df, hue='outcome') sns.plt.show()
code
32068059/cell_4
[ "text_plain_output_1.png" ]
!pip install scispacy !pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.2.4/en_core_sci_lg-0.2.4.tar.gz !jupyter nbextension enable --py --sys-prefix widgetsnbextension
code