path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128046727/cell_52
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Other diseases diagnosys grouped by sex', data_df, 'other', size=2)
code
128046727/cell_45
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Normal diagnosys grouped by sex', data_df, 'normal', size=2)
code
128046727/cell_49
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'AMD diagnosys grouped by sex', data_df, 'amd', size=2)
code
128046727/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('myopia', 'Myopia', data_df, size=2)
code
128046727/cell_51
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Myopia diagnosys grouped by sex', data_df, 'myopia', size=2)
code
128046727/cell_59
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import glob import imageio import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() import imageio IMAGE_PATH = "/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/Training Images" def show_images(df, title="Diagnosys", eye_exam="left_fundus"): print(f"{title}; eye exam: {eye_exam}") f, ax = plt.subplots(4,4, figsize=(16,16)) for i,idx in enumerate(df.index): dd = df.iloc[idx] image_name = dd[eye_exam] image_path = os.path.join(IMAGE_PATH, image_name) img_data=imageio.imread(image_path) ax[i//4, i%4].imshow(img_data) ax[i//4, i%4].axis('off') plt.show() df = data_df.loc[(data_df.cataract == 1) & (data_df.left_diagnosys == 'cataract')].sample(16).reset_index() df = data_df.loc[(data_df.cataract == 1) & (data_df.right_diagnosys == 'cataract')].sample(16).reset_index() df = data_df.loc[(data_df.glaucoma == 1) & (data_df.left_diagnosys == 'glaucoma')].sample(16).reset_index() show_images(df, title='Left eye with glaucoma', eye_exam='left_fundus')
code
128046727/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('glaucoma', 'Glaucoma', data_df, size=2)
code
128046727/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] data_df.head()
code
128046727/cell_47
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Glaucoma diagnosys grouped by sex', data_df, 'glaucoma', size=2)
code
128046727/cell_17
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] print(f'data shape: {data_df.shape}') print(f'left fundus: {data_df.left_fundus.nunique()}') print(f'right fundus: {data_df.right_fundus.nunique()}')
code
128046727/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('left_diagnosys', 'Left eye diagnosys (first 20 values)', data_df, size=4)
code
128046727/cell_43
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] show_wordcloud(cataract_df['right_diagnosys'], title='Prevalent words in right eye diagnosys for cataract')
code
128046727/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('hypertension', 'Hypertension', data_df, size=2)
code
128046727/cell_46
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Diabetes diagnosys grouped by sex', data_df, 'diabetes', size=2)
code
128046727/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('age', 'Age', data_df, size=5, show_all=True)
code
128046727/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('diabetes', 'Diabetes', data_df, size=2)
code
128046727/cell_5
[ "image_output_1.png" ]
import os for dirname, _, filenames in os.walk('/kaggle/input'): print(dirname)
code
128046727/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('right_diagnosys', 'Right eye diagnosys (first 20 values)', data_df, size=4)
code
106190796/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape round(df.isna().sum() / df.shape[0] * 100, 2)
code
106190796/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.head()
code
106190796/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape
code
106190796/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape sns.countplot(df.Transported)
code
106190796/cell_18
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape round(df.isna().sum() / df.shape[0] * 100, 2) df.drop(['PassengerId', 'Cabin', 'Name'], axis=1, inplace=True) df.Transported = df.Transported.astype('str') le = LabelEncoder() for col in df.select_dtypes(include='object'): df[col] = le.fit_transform(df[col]) KNNImpute = KNNImputer() newdf = KNNImpute.fit_transform(df) df = pd.DataFrame(newdf, columns=df.columns) #Outliers Detection fig, ax = plt.subplots(figsize = (15,10)) sns.boxplot(data = df, ax = ax, palette='Reds') plt.show() plt.figure(figsize=(10, 7)) sns.heatmap(df.corr(), cmap='Reds', annot=True)
code
106190796/cell_15
[ "text_html_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape round(df.isna().sum() / df.shape[0] * 100, 2) df.drop(['PassengerId', 'Cabin', 'Name'], axis=1, inplace=True) df.Transported = df.Transported.astype('str') le = LabelEncoder() for col in df.select_dtypes(include='object'): df[col] = le.fit_transform(df[col]) KNNImpute = KNNImputer() newdf = KNNImpute.fit_transform(df) df = pd.DataFrame(newdf, columns=df.columns) fig, ax = plt.subplots(figsize=(15, 10)) sns.boxplot(data=df, ax=ax, palette='Reds') plt.show()
code
106190796/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5)
code
106190796/cell_17
[ "text_html_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape round(df.isna().sum() / df.shape[0] * 100, 2) df.drop(['PassengerId', 'Cabin', 'Name'], axis=1, inplace=True) df.Transported = df.Transported.astype('str') le = LabelEncoder() for col in df.select_dtypes(include='object'): df[col] = le.fit_transform(df[col]) KNNImpute = KNNImputer() newdf = KNNImpute.fit_transform(df) df = pd.DataFrame(newdf, columns=df.columns) df.describe()
code
106190796/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.shape round(df.isna().sum() / df.shape[0] * 100, 2) for col in df.select_dtypes(include='object'): print(f'{col} -- > {df[col].unique()} \nTotal Unique Values --> {df[col].nunique()}\n-----------------------------------')
code
106190796/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') df.sample(5) df.describe(percentiles=[i / 10 for i in range(1, 10)])
code
34136379/cell_25
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) print(scaler.fit(df))
code
34136379/cell_34
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) scaled_df = scaler.transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) awesome_song = pd.DataFrame(data=None, columns=df.columns) awesome_song = awesome_song.append(pd.Series(), ignore_index=True) awesome_song awesome_song.iloc[0] = df.iloc[0] awesome_song.columns
code
34136379/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) df.head()
code
34136379/cell_30
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) scaled_df = scaler.transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) awesome_song = pd.DataFrame(data=None, columns=df.columns) awesome_song = awesome_song.append(pd.Series(), ignore_index=True) awesome_song
code
34136379/cell_33
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) scaled_df = scaler.transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) awesome_song = pd.DataFrame(data=None, columns=df.columns) awesome_song = awesome_song.append(pd.Series(), ignore_index=True) awesome_song awesome_song.iloc[0] = df.iloc[0] awesome_song
code
34136379/cell_19
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts()
code
34136379/cell_28
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) scaled_df = scaler.transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) scaled_df.head()
code
34136379/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) df['ArtisT_Name'].unique()
code
34136379/cell_43
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 df.dtypes.value_counts() df = pd.get_dummies(df) df = df.reindex(sorted(df.columns), axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0, 1)) scaled_df = scaler.transform(df) scaled_df = pd.DataFrame(scaled_df, columns=df.columns) awesome_song = pd.DataFrame(data=None, columns=df.columns) awesome_song = awesome_song.append(pd.Series(), ignore_index=True) awesome_song awesome_song.iloc[0] = df.iloc[0] awesome_song.columns awesome_song = awesome_song.reindex(sorted(awesome_song.columns), axis=1) awesome_song = scaler.transform(awesome_song) awesome_song = pd.DataFrame(awesome_song, columns=df.columns) awesome_song
code
34136379/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import statsmodels.formula.api as smf df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) import statsmodels.formula.api as smf import statsmodels.stats.multicomp as multi def uni_analysis(df): x = df.select_dtypes(include=['O']) p_value = [] for i in x: para = 'Popularity ~ ' + str(i) model = smf.ols(formula=para, data=df) results = model.fit() p_value.append(results.f_pvalue) df1 = pd.DataFrame(list(zip(x, p_value)), columns=['Variable', 'p_value']) df1['Drop_column'] = df1['p_value'].apply(lambda x: 'True' if x > 0.05 else 'False') return df1 uni_analysis(df)
code
34136379/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False) import seaborn as sns corr = df.corr() ax = sns.heatmap(corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True)
code
34136379/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-50-spotify/top50.csv', encoding='latin-1') df.sort_values(by=['Popularity'], ascending=False)
code
128008136/cell_13
[ "text_plain_output_1.png" ]
!pip install -q -U segmentation-models-pytorch albumentations > /dev/null import segmentation_models_pytorch as smp
code
128008136/cell_6
[ "text_plain_output_1.png" ]
import os import pandas as pd DATA_DIR = '/kaggle/input/cvcclinicdb' metadata_df = pd.read_csv(os.path.join(DATA_DIR, 'metadata.csv')) metadata_df = metadata_df[['frame_id', 'png_image_path', 'png_mask_path']] metadata_df['png_image_path'] = metadata_df['png_image_path'].apply(lambda img_pth: os.path.join(DATA_DIR, img_pth)) metadata_df['png_mask_path'] = metadata_df['png_mask_path'].apply(lambda img_pth: os.path.join(DATA_DIR, img_pth)) metadata_df = metadata_df.sample(frac=1).reset_index(drop=True) valid_df = metadata_df.sample(frac=0.1, random_state=42) train_df = metadata_df.drop(valid_df.index) (len(train_df), len(valid_df))
code
128008136/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd DATA_DIR = '/kaggle/input/cvcclinicdb' metadata_df = pd.read_csv(os.path.join(DATA_DIR, 'metadata.csv')) metadata_df = metadata_df[['frame_id', 'png_image_path', 'png_mask_path']] metadata_df['png_image_path'] = metadata_df['png_image_path'].apply(lambda img_pth: os.path.join(DATA_DIR, img_pth)) metadata_df['png_mask_path'] = metadata_df['png_mask_path'].apply(lambda img_pth: os.path.join(DATA_DIR, img_pth)) metadata_df = metadata_df.sample(frac=1).reset_index(drop=True) valid_df = metadata_df.sample(frac=0.1, random_state=42) train_df = metadata_df.drop(valid_df.index) (len(train_df), len(valid_df)) class_dict = pd.read_csv(os.path.join(DATA_DIR, 'class_dict.csv')) class_names = class_dict['class_names'].tolist() class_rgb_values = class_dict[['r', 'g', 'b']].values.tolist() print('All dataset classes and their corresponding RGB values in labels:') print('Class Names: ', class_names) print('Class RGB values: ', class_rgb_values)
code
128008136/cell_15
[ "text_plain_output_1.png" ]
!pip install torchsummary from torchsummary import summary summary(model,(3, 288, 384))
code
331059/cell_4
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra def logloss_err(y, p): N = len(p) err = -1 / N * np.sum(y * np.log(p)) return err p = np.array([[0.2, 0.8], [0.7, 0.3]]) y = np.array([[0, 1], [1, 0]]) logloss_err(y, p)
code
331059/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack import numpy as np # linear algebra import os #joining filepath import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def logloss_err(y, p): N = len(p) err = -1 / N * np.sum(y * np.log(p)) return err p = np.array([[0.2, 0.8], [0.7, 0.3]]) y = np.array([[0, 1], [1, 0]]) logloss_err(y, p) datadir = '../input' gatrain = pd.read_csv(os.path.join(datadir, 'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir, 'gender_age_test.csv'), index_col='device_id') phone = pd.read_csv(os.path.join(datadir, 'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id', keep='first').set_index('device_id') events = pd.read_csv(os.path.join(datadir, 'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir, 'app_events.csv'), usecols=['event_id', 'app_id', 'is_active'], dtype={'is_active': bool}) applabels = pd.read_csv(os.path.join(datadir, 'app_labels.csv')) gatrain['trainrow'] = np.arange(gatrain.shape[0]) brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix((np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix((np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
code
32068117/cell_21
[ "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from cleantext import clean from sentence_transformers import SentenceTransformer from summarizer import Summarizer from tqdm import tqdm import faiss import numpy as np import os import pandas as pd from sentence_transformers import SentenceTransformer import faiss from tqdm import tqdm from summarizer import Summarizer import scipy.spatial import pandas as pd import numpy as np import re import json import time import os from cleantext import clean def filter_text(text): text = text.lower() text = text.replace(',', ' ').replace('-', ' ').replace('_', ' ').replace('/', ' ').replace('“', ' ').replace('"', ' ').replace("'", ' ').replace(';', ' ').replace(':', ' ') text = text.replace(',', ' ').replace('-', ' ').replace('_', ' ').replace('/', ' ').replace('“', ' ').replace('"', ' ').replace("'", ' ') text = text.replace('.', ' ').replace(')', ' ').replace('(', ' ').replace('#', ' ').replace('в®', ' ').replace('®', ' ') text = text.replace('$', ' ').replace('*', ' ').replace('”', ' ').replace('+', ' ').replace('&', ' ').replace('>', ' ').replace('=', ' ') text = text.replace('[', ' ').replace(']', ' ').replace('?', ' ').replace('\\', ' ').replace('<', ' ') text = ' '.join([w for w in text.split() if len(w) > 1]) text = ' '.join(text.lower().split()[:30]) return text def getIndex(corpus, ids): index = [] index2 = [] index = faiss.IndexFlatIP(corpus.shape[1]) faiss.normalize_L2(corpus) index.train(corpus) index2 = faiss.IndexIDMap(index) index2.add_with_ids(corpus, ids) return index2 def getEmbed(train, embedder): corpus = train.astype(str).values.tolist() df_embeddings = pd.DataFrame(embedder.encode(corpus), index=train.index) df_model = pd.concat([df_embeddings, train], axis=1, join='inner').reindex(train.index) return df_model def writeModel(df_model, index_file_prefix, info_file_prefix): corpus_embeddings = np.ascontiguousarray(df_model.filter(regex='\\d+', axis=1)).astype('float32') index = getIndex(corpus_embeddings, df_model.index.values) faiss.write_index(index, index_file_prefix + df_model.columns[-1]) df_model.iloc[:, -1].to_json(info_file_prefix + df_model.columns[-1] + '.json') return def writeModelBody(df_model, df_info, index_file_prefix, info_file_prefix): corpus_embeddings = np.ascontiguousarray(df_model.filter(regex='\\d+', axis=1)).astype('float32') index = getIndex(corpus_embeddings, df_model.index.values) faiss.write_index(index, index_file_prefix + df_model.columns[-1]) df_info.to_json(info_file_prefix + df_model.columns[-1] + '.json') return def prepTitle(Tdata): data = Tdata.dropna(subset=['title']) data['title'] = data['title'].apply(lambda x: clean(x, no_urls=True, no_emails=True, no_phone_numbers=True)) data['title'] = data['title'].apply(filter_text) data = data.drop_duplicates(subset=['title']) return data.title def prepAbstract(Adata): data = Adata.dropna(subset=['abstract']) data['abstract'] = data['abstract'].apply(lambda x: clean(x, no_urls=True, no_emails=True, no_phone_numbers=True)) data['abstract'] = data['abstract'].apply(filter_text) data = data.drop_duplicates(subset=['abstract']) return data.abstract def prepBody(Adata, embedder): model = Summarizer(model='distilbert-base-uncased') tests = pd.DataFrame() data = Adata.dropna(subset=['text']) for index, row in tqdm(data.iterrows(), total=data.shape[0]): ggg = row.text.strip('][').split(', ') ct = [len(i.split()) for i in ggg] kkk = row.key.strip('][').split(', ') kkkk = [int(x.strip("'")) for x in kkk] hh = [row.name] * len(kkkk) data1 = pd.DataFrame({'text': ggg, 'paraIndex': kkkk, 'paperIndex': hh, 'paraCount': ct}) tests = tests.append(data1) tests = tests.reset_index(drop=True) sub1 = tests sub1_a = sub1.iloc[250000:275000, :] sub = sub1_a tqdm.pandas() sub['body'] = sub['text'].progress_apply(lambda x: model(clean(x, no_urls=True, no_emails=True, no_phone_numbers=True))) df_embed_abs = getEmbed(sub['body'], embedder) sub.to_json('body_summary_11.json') df_embed_abs.to_json('embed_summary_11.json') return sub def prepQuery(data): cdata = clean(data, no_urls=True, no_emails=True, no_phone_numbers=True) fdata = filter_text(cdata) return fdata def loadModel(index_path, info_path): if os.path.exists(index_path): index = faiss.read_index(index_path) if os.path.exists(info_path): info = pd.read_json(info_path, typ='series') return (index, info) def getLocs(D, I, thresh): ii = I[np.where(D > thresh)] if len(ii) == 0: jj = I[np.where(D > thresh - 0.1)] elif len(ii) == D.shape[1]: jj = I[np.where(D > thresh + 0.05)] if len(jj) == 0: jj = ii else: jj = ii return jj def getRanks(index, info, query, top, thresh): faiss.normalize_L2(query) D, I = index.search(query, top) inds = getLocs(D, I, thresh) subset = info.loc[inds] return subset def predict(data_in, embedder, thresh, n_closest, index_file_path, info_file_path): index, info = loadModel(index_file_path, info_file_path) query = prepQuery(data_in) query_embedding = np.ascontiguousarray(embedder.encode([query])).astype('float32') ranks = getRanks(index, info, query_embedding, n_closest, thresh) return ranks embedder = SentenceTransformer('/kaggle/input/ethics-transformer/bert-base-ethics_training/') in_file = '/kaggle/input/covid-processed-data/combined_data3.csv' index_file_prefix = '/kaggle/working/corpus_index_' info_file_prefix = '/kaggle/working/corpus_info_' thresh = 0.6 topRank = 20 isTrainAll = False isTrainTitle = False isTrainAbstract = False isTrainBody = True if isTrainAll: isTrainTitle = True isTrainAbstract = True isTrainBody = True isInferAll = False isInferTitle = False isInferAbstract = False isInferBody = False if isInferAll: isInferTitle = True isInferAbstract = True isInferBody = True data = pd.read_csv(in_file) data = data.replace('nan', np.nan, regex=False) if isTrainBody: df_train_body = prepBody(data, embedder) print('Body Corpus len:', len(df_train_body))
code
32068117/cell_2
[ "text_plain_output_1.png" ]
import os from sentence_transformers import SentenceTransformer import faiss from tqdm import tqdm from summarizer import Summarizer import scipy.spatial import pandas as pd import numpy as np import re import json import time import os from cleantext import clean for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068117/cell_1
[ "text_plain_output_1.png" ]
!pip install torch===1.4.0 torchvision===0.5.0 -f https://download.pytorch.org/whl/torch_stable.html !pip install clean-text !pip install -U sentence-transformers !pip install tqdm !pip install faiss-cpu --no-cache !pip install bert-extractive-summarizer !pip install spacy==2.1.3
code
17118681/cell_20
[ "text_plain_output_1.png" ]
print('Salvando modelo em arquivo \n') mp = '.\\mnist_model.h5' model.save(mp)
code
17118681/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
batch_size = 128 max_epochs = 50 print('Iniciando treinamento... ')
code
17118681/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import keras as K import tensorflow as tf import pandas as pd import seaborn as sns import os from matplotlib import pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
code
17118681/cell_24
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) mp = '.\\mnist_model.h5' model.save(mp) unknown = np.zeros(shape=(28, 28), dtype=np.float32) for row in range(5, 23): unknown[row][9] = 180 for rc in range(9, 19): unknown[rc][rc] = 250 unknown = unknown.reshape(1, 28, 28, 1) predicted = model.predict(unknown) str = ['zero', 'um', 'dois', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove'] index = np.argmax(predicted[0]) digit = str[index] print(digit)
code
17118681/cell_22
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import numpy as np import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) mp = '.\\mnist_model.h5' model.save(mp) print('Usando o modelo para previsão de dígitos para a imagem: ') unknown = np.zeros(shape=(28, 28), dtype=np.float32) for row in range(5, 23): unknown[row][9] = 180 for rc in range(9, 19): unknown[rc][rc] = 250 plt.imshow(unknown, cmap=plt.get_cmap('gray_r')) plt.show() unknown = unknown.reshape(1, 28, 28, 1) predicted = model.predict(unknown) print('\nO valor do dígito previsto é: ') print(predicted)
code
90133914/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # print(df.head()) # print(datetime.fromtimestamp(df['timestamp'][10000] / 1000.0)) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) df.diff().plot(kind='line') maxidx = df.idxmax() print(maxidx) print(df['timestamp'][maxidx['timestamp']])
code
90133914/cell_6
[ "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from datetime import datetime, timedelta from google.cloud import bigquery from scipy.stats import norm import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # print(df.head()) # print(datetime.fromtimestamp(df['timestamp'][10000] / 1000.0)) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) maxidx = df.idxmax() from scipy.stats import norm from datetime import datetime, timedelta import numpy as np df = df.diff() df = df.dropna() print(df.head()) print(df.describe()) print(df.dtypes) # convert timedelta type to a float (seconds) print(df['timestamp'][2].total_seconds()) df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds()) float_summary = df.describe() print(float_summary) time_threshold = timedelta(hours=2).total_seconds() print(float_summary["timestamp"][1]) # mean print(float_summary["timestamp"][2]) # std print(time_threshold) prob_of_greater_than_two_hours = norm.cdf(time_threshold, float_summary['timestamp'][1], float_summary['timestamp'][2]) print(prob_of_greater_than_two_hours) print((1 - prob_of_greater_than_two_hours) * df.shape[0]) print(df.shape[0]) print(len(df[df['timestamp'] > time_threshold])) print(df.timestamp.quantile(0.99)) print(df.timestamp.quantile(0.1))
code
90133914/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) print('Size of dataframe: {} Bytes'.format(int(df.memory_usage(index=True, deep=True).sum()))) df.head(10)
code
90133914/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary)
code
90133914/cell_5
[ "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from datetime import datetime, timedelta from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # print(df.head()) # print(datetime.fromtimestamp(df['timestamp'][10000] / 1000.0)) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) maxidx = df.idxmax() from scipy.stats import norm from datetime import datetime, timedelta import numpy as np df = df.diff() df = df.dropna() print(df.head()) print(df.describe()) print(df.dtypes) print(df['timestamp'][2].total_seconds()) df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds()) float_summary = df.describe() print(float_summary) time_threshold = timedelta(hours=2).total_seconds() print(float_summary['timestamp'][1]) print(float_summary['timestamp'][2])
code
34120483/cell_42
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.linear_model import LogisticRegression lr_clf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0) lr_clf.fit(X_train_onehot, y_train) lr_pred = lr_clf.predict(X_val_onehot) from sklearn.metrics import log_loss logloss_lr = log_loss(y_val, lr_clf.predict_proba(X_val_onehot)) from sklearn.naive_bayes import MultinomialNB mnb_clf = MultinomialNB() mnb_clf.fit(X_train_onehot, y_train) mnb_pred = mnb_clf.predict(X_val_onehot) logloss_mnb = log_loss(y_val, mnb_clf.predict_proba(X_val_onehot)) rf_clf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1) rf_clf.fit(X_train_onehot, y_train) rf_pred = rf_clf.predict(X_val_onehot) logloss_rf = log_loss(y_val, rf_clf.predict_proba(X_val_onehot)) lr_predictions_val = lr_clf.predict_proba(X_val_onehot) mnb_predictions_val = mnb_clf.predict_proba(X_val_onehot) rf_predictions_val = rf_clf.predict_proba(X_val_onehot) predictions_val = 1 / 5 * lr_predictions_val[:, 1] + 1 / 5 * rf_predictions_val[:, 1] predictions_val = 1 / 3 * lr_predictions_val[:, 1] + 1 / 3 * mnb_predictions_val[:, 1] predictions_val = np.where(predictions_val > 0.5, 1, 0) df_actual = pd.read_csv('../input/nlp-getting-started/test.csv') X_test = df_actual['text_processed'] X_test_onehot = vectorizer.transform(X_test).todense() lr_predictions = lr_clf.predict_proba(X_test_onehot) mnb_predictions = mnb_clf.predict_proba(X_test_onehot) rf_predictions = rf_clf.predict_proba(X_test_onehot) predictions = 1 / 5 * lr_predictions[:, 1] + 1 / 5 * rf_predictions[:, 1] predictions = 1 / 3 * lr_predictions[:, 1] + 1 / 3 * mnb_predictions[:, 1] predictions = np.where(predictions > 0.5, 1, 0) df_submission = pd.read_csv('../input/nlp-getting-started/test.csv') df_submission['target'] = predictions df_submission.to_csv('submission.csv', index=False) df_submission.head()
code
34120483/cell_21
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.linear_model import LogisticRegression lr_clf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0) lr_clf.fit(X_train_onehot, y_train) lr_pred = lr_clf.predict(X_val_onehot) print('accuracy score: ', accuracy_score(lr_pred, y_val)) print(classification_report(y_val, lr_pred))
code
34120483/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns #what is seaborn df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) #creating subplots to see distribution of length of tweet sns.set_style("darkgrid"); f, (ax1, ax2) = plt.subplots(figsize=(12,6),nrows=1, ncols=2,tight_layout=True); sns.distplot(df_sample[df_sample['target']==1]["length"],bins=30,ax=ax1); sns.distplot(df_sample[df_sample['target']==0]["length"],bins=30,ax=ax2); ax1.set_title('\n Distribution of length of tweet labelled Disaster\n'); ax2.set_title('\nDistribution of length of tweet labelled No Disaster\n '); ax1.set_ylabel('Frequency'); text = ' '.join((post for post in df_sample[df_sample['target'] == 1].text)) wordcloud = WordCloud(max_font_size=90, max_words=50, background_color='white', colormap='inferno').generate(text) plt.axis('off') text = ' '.join((post for post in df_sample[df_sample['target'] == 0].text)) wordcloud = WordCloud(max_font_size=90, max_words=50, background_color='white', colormap='inferno').generate(text) plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') plt.title('\nFrequently occuring words related to Non Disaster \n\n', fontsize=18) plt.axis('off') plt.show()
code
34120483/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.naive_bayes import MultinomialNB mnb_clf = MultinomialNB() mnb_clf.fit(X_train_onehot, y_train) mnb_pred = mnb_clf.predict(X_val_onehot) logloss_mnb = log_loss(y_val, mnb_clf.predict_proba(X_val_onehot)) print('logloss_mnb:', logloss_mnb)
code
34120483/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample.head()
code
34120483/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_actual = pd.read_csv('../input/nlp-getting-started/test.csv')
code
34120483/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int)
code
34120483/cell_41
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.linear_model import LogisticRegression lr_clf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0) lr_clf.fit(X_train_onehot, y_train) lr_pred = lr_clf.predict(X_val_onehot) from sklearn.metrics import log_loss logloss_lr = log_loss(y_val, lr_clf.predict_proba(X_val_onehot)) from sklearn.naive_bayes import MultinomialNB mnb_clf = MultinomialNB() mnb_clf.fit(X_train_onehot, y_train) mnb_pred = mnb_clf.predict(X_val_onehot) logloss_mnb = log_loss(y_val, mnb_clf.predict_proba(X_val_onehot)) rf_clf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1) rf_clf.fit(X_train_onehot, y_train) rf_pred = rf_clf.predict(X_val_onehot) logloss_rf = log_loss(y_val, rf_clf.predict_proba(X_val_onehot)) lr_predictions_val = lr_clf.predict_proba(X_val_onehot) mnb_predictions_val = mnb_clf.predict_proba(X_val_onehot) rf_predictions_val = rf_clf.predict_proba(X_val_onehot) predictions_val = 1 / 5 * lr_predictions_val[:, 1] + 1 / 5 * rf_predictions_val[:, 1] predictions_val = 1 / 3 * lr_predictions_val[:, 1] + 1 / 3 * mnb_predictions_val[:, 1] predictions_val = np.where(predictions_val > 0.5, 1, 0) df_actual = pd.read_csv('../input/nlp-getting-started/test.csv') X_test = df_actual['text_processed'] X_test_onehot = vectorizer.transform(X_test).todense() lr_predictions = lr_clf.predict_proba(X_test_onehot) mnb_predictions = mnb_clf.predict_proba(X_test_onehot) rf_predictions = rf_clf.predict_proba(X_test_onehot) predictions = 1 / 5 * lr_predictions[:, 1] + 1 / 5 * rf_predictions[:, 1] predictions = 1 / 3 * lr_predictions[:, 1] + 1 / 3 * mnb_predictions[:, 1] predictions = np.where(predictions > 0.5, 1, 0) df_submission = pd.read_csv('../input/nlp-getting-started/test.csv') df_submission['target'] = predictions df_submission.to_csv('submission.csv', index=False)
code
34120483/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns #what is seaborn df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) sns.set_style('darkgrid') f, (ax1, ax2) = plt.subplots(figsize=(12, 6), nrows=1, ncols=2, tight_layout=True) sns.distplot(df_sample[df_sample['target'] == 1]['length'], bins=30, ax=ax1) sns.distplot(df_sample[df_sample['target'] == 0]['length'], bins=30, ax=ax2) ax1.set_title('\n Distribution of length of tweet labelled Disaster\n') ax2.set_title('\nDistribution of length of tweet labelled No Disaster\n ') ax1.set_ylabel('Frequency')
code
34120483/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) print('vocab length', len(vectorizer.vocabulary_))
code
34120483/cell_32
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.linear_model import LogisticRegression lr_clf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0) lr_clf.fit(X_train_onehot, y_train) lr_pred = lr_clf.predict(X_val_onehot) from sklearn.metrics import log_loss logloss_lr = log_loss(y_val, lr_clf.predict_proba(X_val_onehot)) from sklearn.naive_bayes import MultinomialNB mnb_clf = MultinomialNB() mnb_clf.fit(X_train_onehot, y_train) mnb_pred = mnb_clf.predict(X_val_onehot) logloss_mnb = log_loss(y_val, mnb_clf.predict_proba(X_val_onehot)) rf_clf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1) rf_clf.fit(X_train_onehot, y_train) rf_pred = rf_clf.predict(X_val_onehot) logloss_rf = log_loss(y_val, rf_clf.predict_proba(X_val_onehot)) lr_predictions_val = lr_clf.predict_proba(X_val_onehot) mnb_predictions_val = mnb_clf.predict_proba(X_val_onehot) rf_predictions_val = rf_clf.predict_proba(X_val_onehot) predictions_val = 1 / 5 * lr_predictions_val[:, 1] + 1 / 5 * rf_predictions_val[:, 1] predictions_val = 1 / 3 * lr_predictions_val[:, 1] + 1 / 3 * mnb_predictions_val[:, 1] predictions_val = np.where(predictions_val > 0.5, 1, 0) print('accuracy score: ', accuracy_score(predictions_val, y_val)) print(classification_report(y_val, predictions_val))
code
34120483/cell_28
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() rf_clf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1) rf_clf.fit(X_train_onehot, y_train) rf_pred = rf_clf.predict(X_val_onehot) logloss_rf = log_loss(y_val, rf_clf.predict_proba(X_val_onehot)) print('logloss_rf:', logloss_rf)
code
34120483/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns #what is seaborn df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) #creating subplots to see distribution of length of tweet sns.set_style("darkgrid"); f, (ax1, ax2) = plt.subplots(figsize=(12,6),nrows=1, ncols=2,tight_layout=True); sns.distplot(df_sample[df_sample['target']==1]["length"],bins=30,ax=ax1); sns.distplot(df_sample[df_sample['target']==0]["length"],bins=30,ax=ax2); ax1.set_title('\n Distribution of length of tweet labelled Disaster\n'); ax2.set_title('\nDistribution of length of tweet labelled No Disaster\n '); ax1.set_ylabel('Frequency'); text = ' '.join((post for post in df_sample[df_sample['target'] == 1].text)) wordcloud = WordCloud(max_font_size=90, max_words=50, background_color='white', colormap='inferno').generate(text) plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') plt.title('\nFrequently occuring words related to Disaster \n\n', fontsize=18) plt.axis('off') plt.show()
code
34120483/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_actual = pd.read_csv('../input/nlp-getting-started/test.csv') df_actual.head()
code
34120483/cell_24
[ "image_output_1.png" ]
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.naive_bayes import MultinomialNB mnb_clf = MultinomialNB() mnb_clf.fit(X_train_onehot, y_train) mnb_pred = mnb_clf.predict(X_val_onehot) print('accuracy score: ', accuracy_score(mnb_pred, y_val)) print(classification_report(y_val, mnb_pred))
code
34120483/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) df_sample.head()
code
34120483/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() from sklearn.linear_model import LogisticRegression lr_clf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0) lr_clf.fit(X_train_onehot, y_train) lr_pred = lr_clf.predict(X_val_onehot) from sklearn.metrics import log_loss logloss_lr = log_loss(y_val, lr_clf.predict_proba(X_val_onehot)) print('logloss_lr:', logloss_lr)
code
34120483/cell_27
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,make_scorer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=3, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1, max_features=None) vectorizer.fit(list(df_sample['text_processed']) + list(df_actual['text_processed'])) X_train_onehot = vectorizer.transform(X_train).todense() X_val_onehot = vectorizer.transform(X_val).todense() rf_clf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1) rf_clf.fit(X_train_onehot, y_train) rf_pred = rf_clf.predict(X_val_onehot) print('accuracy score: ', accuracy_score(rf_pred, y_val)) print(classification_report(y_val, rf_pred))
code
34120483/cell_12
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns #what is seaborn df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['length'] = np.NaN for i in range(0, len(df_sample['text'])): df_sample['length'][i] = len(df_sample['text'][i]) df_sample.length = df_sample.length.astype(int) #creating subplots to see distribution of length of tweet sns.set_style("darkgrid"); f, (ax1, ax2) = plt.subplots(figsize=(12,6),nrows=1, ncols=2,tight_layout=True); sns.distplot(df_sample[df_sample['target']==1]["length"],bins=30,ax=ax1); sns.distplot(df_sample[df_sample['target']==0]["length"],bins=30,ax=ax2); ax1.set_title('\n Distribution of length of tweet labelled Disaster\n'); ax2.set_title('\nDistribution of length of tweet labelled No Disaster\n '); ax1.set_ylabel('Frequency'); text = ' '.join((post for post in df_sample[df_sample['target'] == 1].text)) wordcloud = WordCloud(max_font_size=90, max_words=50, background_color='white', colormap='inferno').generate(text) plt.axis('off') text = ' '.join((post for post in df_sample[df_sample['target'] == 0].text)) wordcloud = WordCloud(max_font_size=90, max_words=50, background_color='white', colormap='inferno').generate(text) plt.axis('off') def text_preprocessing(text): """ input: string to be processed output: preprocssed string """ text = text.lower() text = re.compile('https?://\\S+|www\\.\\S+').sub('', text) text = text.translate(str.maketrans('', '', PUNCT_TO_REMOVE)) text = ' '.join([word for word in str(text).split() if word not in STOPWORDS]) text = ' '.join([stemmer.stem(word) for word in text.split()]) return text text_preprocessing('#Flashflood causes #landslide in Gilgit #Pakistan Damage to 20 homes\n farmland roads and bridges #365disasters http://t.co/911F3IXRH0')
code
34120483/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_sample = pd.read_csv('/kaggle/input/train.csv') df_actual = pd.read_csv('/kaggle/input/test.csv') df_sample['target'].value_counts()
code
33097052/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pybtex.database.input import bibtex import pandas as pd
code
1003788/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 12)) sns.heatmap(corr, vmax=1, square=True)
code
1003788/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) print(train.select_dtypes(include=['object']).columns.values)
code
1003788/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}}) all_data = all_data.fillna(all_data.mean()) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) all_data = pd.get_dummies(all_data) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) g = sns.FacetGrid(train, col='YrSold', col_wrap=3) g.map(sns.boxplot, 'MoSold', 'SalePrice', palette='Set2', order=range(1, 13)).set(ylim=(0, 500000)) plt.tight_layout() fig, ax = plt.subplots(2, 1, figsize = (10, 6)) sns.boxplot(x = 'SaleType', y = 'SalePrice', data = train, ax = ax[0]) sns.boxplot(x = 'SaleCondition', y = 'SalePrice', data = train, ax = ax[1]) plt.tight_layout() var = 'YearBuilt' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(16, 8)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); plt.xticks(rotation=90); sns.violinplot('Functional', 'SalePrice', data=train)
code
1003788/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) sns.distplot(train['SalePrice'], kde=False, color='b', hist_kws={'alpha': 0.9})
code
1003788/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) g = sns.FacetGrid(train, col='YrSold', col_wrap=3) g.map(sns.boxplot, 'MoSold', 'SalePrice', palette='Set2', order=range(1, 13)).set(ylim=(0, 500000)) plt.tight_layout()
code
1003788/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20)
code
1003788/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) print('Skewness: %f' % train['SalePrice'].skew()) print('Kurtosis: %f' % train['SalePrice'].kurt())
code
1003788/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train['SalePrice'].describe()
code
1003788/cell_32
[ "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}}) all_data = all_data.fillna(all_data.mean()) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) all_data = pd.get_dummies(all_data) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) g = sns.FacetGrid(train, col='YrSold', col_wrap=3) g.map(sns.boxplot, 'MoSold', 'SalePrice', palette='Set2', order=range(1, 13)).set(ylim=(0, 500000)) plt.tight_layout() fig, ax = plt.subplots(2, 1, figsize = (10, 6)) sns.boxplot(x = 'SaleType', y = 'SalePrice', data = train, ax = ax[0]) sns.boxplot(x = 'SaleCondition', y = 'SalePrice', data = train, ax = ax[1]) plt.tight_layout() var = 'YearBuilt' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(16, 8)) fig = sns.boxplot(x=var, y='SalePrice', data=data) fig.axis(ymin=0, ymax=800000) plt.xticks(rotation=90)
code
1003788/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 6)) sns.countplot(x='Neighborhood', data=train) xt = plt.xticks(rotation=45)
code
1003788/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train.head()
code
1003788/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}}) all_data = all_data.fillna(all_data.mean()) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) all_data = pd.get_dummies(all_data) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) prices.hist()
code
1003788/cell_17
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) sns.distplot(train['SalePrice'])
code
1003788/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) g = sns.FacetGrid(train, col='YrSold', col_wrap=3) g.map(sns.boxplot, 'MoSold', 'SalePrice', palette='Set2', order=range(1, 13)).set(ylim=(0, 500000)) plt.tight_layout() fig, ax = plt.subplots(2, 1, figsize=(10, 6)) sns.boxplot(x='SaleType', y='SalePrice', data=train, ax=ax[0]) sns.boxplot(x='SaleCondition', y='SalePrice', data=train, ax=ax[1]) plt.tight_layout()
code
1003788/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) sns.regplot(x='OverallQual', y='SalePrice', data=train, color='Orange')
code
1003788/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() cor_dict = corr['SalePrice'].to_dict() del cor_dict['SalePrice'] print('List the numerical features decendingly by their correlation with Sale Price:\n') for ele in sorted(cor_dict.items(), key=lambda x: -abs(x[1])): print('{0}: \t{1}'.format(*ele))
code
331419/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def cleanResults(raceColumns,dfResultsTemp,appendScore): for raceCol in raceColumns: dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2") dfResultsTemp.index = dfResultsTemp.index.str.title() dfResultsTemp.index = dfResultsTemp.index.str.replace('\([A-Z\ 0-9]*\)','') dfResultsTemp.index = dfResultsTemp.index.str.strip() dfResultsTemp.index = dfResultsTemp.index.str.replace('Riccardo Andrea Leccese','Rikki Leccese') dfResultsTemp.index = dfResultsTemp.index.str.replace('Nicolas Parlier','Nico Parlier') dfResultsTemp.index = dfResultsTemp.index.str.replace('Alejandro Climent Hernã¥_ Ndez', 'Alejandro Climent Hernandez') dfResultsTemp.index = dfResultsTemp.index.str.replace('Alexandre Caizergues','Alex Caizergues') dfResultsTemp.index = dfResultsTemp.index.str.replace('Florian Trittel Paul','Florian Trittel') dfResultsTemp.index = dfResultsTemp.index.str.replace('Jean Guillaume Rivaud','Jean-Guillaume Rivaud') dfResultsTemp.index = dfResultsTemp.index.str.replace('^Kieran Le$','Kieran Le Borgne') dfResultsTemp.index = dfResultsTemp.index.str.replace('Marvin Baumeisterschoenian','Marvin Baumeister Schoenian') dfResultsTemp.index = dfResultsTemp.index.str.replace('Theo De Ramecourt','Theo De-Ramecourt') dfResultsTemp.index = dfResultsTemp.index.str.replace('James Johnson','James Johnsen') dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str) #This code needs cleaning up. Has the important assumptions about what counts as last place and what doesn't count dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('D\+D|DSQ|D\+0|^-[A-Z0-9]*$|\([A-Z0-9\.-]*\)|UFD|SCP|RDG|RCT|DCT|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*|[0-9\.]*DNC|\/','') dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('DNS','') dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('RET[0-9]*|DNF-[0-9]*|^DNF$|[0-9\.]*DNF',str(len(dfResultsTemp)+1)) dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol]) dfResultsTemp[raceCol] = dfResultsTemp[raceCol] + appendScore return dfResultsTemp def mergeResults(raceColumns, raceName, dfResultsTemp, dfResults): for raceCol in raceColumns: raceIndex = raceName + '-' + raceCol dfResultsTemp[raceIndex] = dfResultsTemp[raceCol] del dfResultsTemp[raceCol] dfResults = pd.merge(dfResults, dfResultsTemp[[raceIndex]], left_index=True, right_index=True, how='outer') return dfResults dfResults = pd.DataFrame()
code
73074336/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns df.isnull().sum() df.drop('company', inplace=True, axis=1) df
code
73074336/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape
code
73074336/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns df.head()
code