path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130007697/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import easyocr reader = easyocr.Reader(['en']) frame = cv.imread(img) frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) result_img = read_plate_number(cordinates, frame, reader) plt.imshow(result_img) plt.show()
code
130007697/cell_14
[ "text_plain_output_1.png" ]
!python train.py --img 640 --batch 16 --epochs 15 --data /kaggle/working/plate_datasets/dataset.yaml --weights yolov5s.pt --cache ram
code
130007697/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
!git clone https://github.com/ultralytics/yolov5 # clone !pip install -r requirements.txt
code
130007697/cell_5
[ "image_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt import cv2 as cv import matplotlib.pyplot as plt img = cv.imread('/kaggle/input/car-plate-detection/images/Cars0.png') img = cv.cvtColor(img, cv.COLOR_BGR2RGB) rec = cv.rectangle(img, (226, 125), (419, 173), (0, 250, 0), 2) rec = cv.circle(rec, ((226 + 419) // 2, (125 + 173) // 2), 2, (255, 0, 0), 2) plt.imshow(rec)
code
320908/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') teams = pd.read_sql_query('select * from Teams', conn) users = pd.read_sql_query('select * from Users', conn) teammembers = pd.read_sql_query('select * from TeamMemberships', conn) teams_q = teammembers.groupby('TeamId').UserId.count() teams_q = teams_q[teams_q > 1].reset_index() teammembers_cut = teammembers.merge(teams_q, on='TeamId') users_q = teammembers_cut.groupby('UserId_x').TeamId.count().reset_index() teammembers_cut = teammembers_cut.merge(users_q, left_on='UserId_x', right_on='UserId_x') teammembers_cut = teammembers_cut.merge(teams, left_on='TeamId_x', right_on='Id') teammembers_cut = teammembers_cut.merge(users, left_on='UserId_x', right_on='Id') tm4graph = teammembers_cut[['TeamId_x', 'UserId_x']] tm4graph['TeamId_x'] = 'Team_' + tm4graph['TeamId_x'].astype('str') tm4graph['UserId_x'] = 'User_' + tm4graph['UserId_x'].astype('str')
code
320908/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import spdiags, coo_matrix import networkx as nx import numpy as np import numpy as np import pandas as pd import plotly import sqlite3 conn = sqlite3.connect('../input/database.sqlite') teams = pd.read_sql_query('select * from Teams', conn) users = pd.read_sql_query('select * from Users', conn) teammembers = pd.read_sql_query('select * from TeamMemberships', conn) teams_q = teammembers.groupby('TeamId').UserId.count() teams_q = teams_q[teams_q > 1].reset_index() teammembers_cut = teammembers.merge(teams_q, on='TeamId') users_q = teammembers_cut.groupby('UserId_x').TeamId.count().reset_index() teammembers_cut = teammembers_cut.merge(users_q, left_on='UserId_x', right_on='UserId_x') teammembers_cut = teammembers_cut.merge(teams, left_on='TeamId_x', right_on='Id') teammembers_cut = teammembers_cut.merge(users, left_on='UserId_x', right_on='Id') tm4graph = teammembers_cut[['TeamId_x', 'UserId_x']] tm4graph['TeamId_x'] = 'Team_' + tm4graph['TeamId_x'].astype('str') tm4graph['UserId_x'] = 'User_' + tm4graph['UserId_x'].astype('str') from scipy.sparse import spdiags, coo_matrix import scipy as sp import numpy as np import matplotlib.pyplot as plt def forceatlas2_layout(G, iterations=10, linlog=False, pos=None, nohubs=False, kr=0.001, k=None, dim=2): """ Options values are g The graph to layout iterations Number of iterations to do linlog Whether to use linear or log repulsion random_init Start with a random position If false, start with FR avoidoverlap Whether to avoid overlap of points degreebased Degree based repulsion """ for n in G: G.node[n]['prevcs'] = 0 G.node[n]['currcs'] = 0 A = nx.to_scipy_sparse_matrix(G, dtype='f') nnodes, _ = A.shape try: A = A.tolil() except Exception as e: A = coo_matrix(A).tolil() if pos is None: pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype) else: pos = pos.astype(A.dtype) if k is None: k = np.sqrt(1.0 / nnodes) t = 0.1 dt = t / float(iterations + 1) displacement = np.zeros((dim, nnodes)) for iteration in range(iterations): displacement *= 0 for i in range(A.shape[0]): delta = (pos[i] - pos).T distance = np.sqrt((delta ** 2).sum(axis=0)) distance = np.where(distance < 0.01, 0.01, distance) Ai = np.asarray(A.getrowview(i).toarray()) Dist = k * k / distance ** 2 if nohubs: Dist = Dist / float(Ai.sum(axis=1) + 1) if linlog: Dist = np.log(Dist + 1) displacement[:, i] += (delta * (Dist - Ai * distance / k)).sum(axis=1) length = np.sqrt((displacement ** 2).sum(axis=0)) length = np.where(length < 0.01, 0.01, length) pos += (displacement * t / length).T t -= dt return dict(zip(G, pos)) axis = dict(showline=False, zeroline=False, showgrid=False, showticklabels=False, title='') layout = Layout(title='Kaggle teams/users universe', font=Font(size=12), showlegend=True, autosize=False, width=800, height=800, xaxis=XAxis(axis), yaxis=YAxis(axis), margin=Margin(l=40, r=40, b=85, t=100), hovermode='closest', annotations=Annotations([Annotation(showarrow=False, text='', xref='paper', yref='paper', x=0, y=-0.1, xanchor='left', yanchor='bottom', font=Font(size=14))])) edges_to_use = 22000 G = nx.Graph() G.add_edges_from(tm4graph.values[0:edges_to_use]) pos = forceatlas2_layout(G, iterations=300, nohubs=True) N = G.number_of_nodes() E = G.edges() labels = G.nodes() Xv_teams = [pos[k][0] for k in labels if 'Team' in k] Yv_teams = [pos[k][1] for k in labels if 'Team' in k] Xv_users = [pos[k][0] for k in labels if 'User' in k] Yv_users = [pos[k][1] for k in labels if 'User' in k] labels_team = [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'TeamName'].values[0] for k in labels if 'Team' in k] labels_users = [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'DisplayName'].values[0] for k in labels if 'User' in k] Xed = [] Yed = [] for edge in E: Xed += [pos[edge[0]][0], pos[edge[1]][0], None] Yed += [pos[edge[0]][1], pos[edge[1]][1], None] trace3 = Scatter(x=Xed, y=Yed, mode='lines', line=Line(color='rgb(200,200,200)', width=2), name='Links', hoverinfo='none') trace4 = Scatter(x=Xv_teams, y=Yv_teams, mode='markers', name='Teams', marker=Marker(symbol='dot', size=[teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'UserId_y'].values[0] for k in labels if 'Team' in k], color='rgb(146,209,81)', line=Line(color='rgb(50,50,50)', width=0.5)), text=map(lambda x: ['Team: ' + u''.join(x[0]).encode('utf8').strip() + '<br>Users: ' + str(','.join(x[1]).encode('utf8')) + '<br>'], zip(labels_team, [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'DisplayName'].values.tolist() for k in labels if 'Team' in k])), hoverinfo='text') trace5 = Scatter(x=Xv_users, y=Yv_users, mode='markers', name='Users', marker=Marker(symbol='dot', size=[teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'TeamId_y'].values[0] * 0.5 for k in labels if 'User' in k], color='#000000', line=Line(color='rgb(50,50,50)', width=0.5)), text=map(lambda x: ['User: ' + u''.join(x[0]).encode('utf8').strip() + '<br>Teams: ' + str(','.join(x[1]).encode('utf8')) + '<br>'], zip(labels_users, [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'TeamName'].values.tolist() for k in labels if 'User' in k])), hoverinfo='text') data1 = Data([trace3, trace4, trace5]) fig1 = Figure(data=data1, layout=layout) plotly.offline.iplot(fig1)
code
16157889/cell_23
[ "text_plain_output_1.png" ]
from copy import deepcopy from sklearn.preprocessing import MinMaxScaler import math import numpy as np import pandas as pd import warnings import pandas as pd import warnings import os import numpy as np import folium from folium import plugins from sklearn.preprocessing import MinMaxScaler from copy import deepcopy import math import time from sklearn.cluster import AgglomerativeClustering from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn import mixture warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) all_data = pd.read_csv('../input/globalterrorismdb_0718dist.csv', encoding='ISO-8859-1') all_data = all_data[['eventid', 'iyear', 'imonth', 'iday', 'extended', 'country', 'region', 'vicinity', 'latitude', 'longitude', 'specificity', 'crit1', 'crit2', 'crit3', 'doubtterr', 'multiple', 'attacktype1', 'success', 'suicide', 'weaptype1', 'targtype1', 'nkill', 'nkillter', 'nwound', 'nwoundte', 'property', 'propextent', 'ishostkid', 'ransom', 'INT_LOG', 'INT_IDEO', 'INT_MISC', 'INT_ANY']] all_data = all_data.dropna(subset=['latitude']) all_data = all_data.dropna(subset=['longitude']) all_data.shape all_data['specificity'] = pd.to_numeric(all_data['specificity'], downcast='integer') all_data['doubtterr'] = pd.to_numeric(all_data['doubtterr'], downcast='integer') all_data['multiple'] = pd.to_numeric(all_data['multiple'], downcast='integer') all_data['nkill'] = pd.to_numeric(all_data['nkill'], downcast='integer') all_data['nkillter'] = pd.to_numeric(all_data['nkillter'], downcast='integer') all_data['nwound'] = pd.to_numeric(all_data['nwound'], downcast='integer') all_data['nwoundte'] = pd.to_numeric(all_data['nwoundte'], downcast='integer') all_data['ishostkid'] = pd.to_numeric(all_data['ishostkid'], downcast='integer') all_data['ransom'] = pd.to_numeric(all_data['ransom'], downcast='integer') id_ = list(all_data['eventid']) all_data.drop('eventid', axis=1, inplace=True) all_data = all_data[['crit1', 'crit2', 'crit3', 'doubtterr', 'multiple', 'attacktype1', 'success', 'suicide', 'weaptype1', 'targtype1', 'nkill', 'nkillter', 'nwound', 'nwoundte', 'property', 'propextent', 'ishostkid', 'ransom']] """ author: zhenyu wu time: 2019/04/07 21:48 function: 对数据样本进行归一化处理,将区间放缩到某个范围内 在MinMaxScaler中是给定了一个明确的最大值与最小值。它的计算公式如下: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) | X_scaled = X_std / (max - min) + min 每个特征中的最小值变成了MIN,最大值变成了MAX。这样做是为了在熵权法取对数过程中避免向下溢出。 params: data: 数据集 MIN: 下限 MAX: 上限 return: data_norm: 归一化后的数据集 """ def normalization(data, MIN=0.002, MAX=0.998): min_max_scaler = MinMaxScaler(feature_range=(MIN, MAX)) data_norm = min_max_scaler.fit_transform(data) data_norm = pd.DataFrame(data_norm) data_norm.columns = data.columns return data_norm all_data_norm = normalization(all_data, MIN=0.002, MAX=0.998) """ author: zhenyu wu time: 2019/04/07 21:47 function: 熵权法确定特征权重 params: data_norm: 数据集 threshold: 阈值 return: Entropy: 特征的熵值 difference_coefficient: 特征的差异系数 important_features: 重要特征名称序列 entropy_weight: 特征熵权 overall: 样本的重要性分数 """ def entropy(data_norm, data_id, threshold=0.8): feature_weight = pd.DataFrame({'temp': list(np.zeros(len(data_norm)))}) for i in data_norm.columns: Sum = data_norm[i].sum() temp = data_norm[i] / Sum feature_weight[i] = temp feature_weight.drop('temp', axis=1, inplace=True) Entropy = {} for i in feature_weight.columns: Sum = 0 column = list(deepcopy(feature_weight[i])) for j in range(len(feature_weight)): Sum += column[j] * math.log(column[j]) Entropy[i] = -1 / math.log(len(feature_weight)) * Sum important_features = [] for key, value in Entropy.items(): important_features.append(key) difference_coefficient = {} for i in important_features: difference_coefficient[i] = 1 - Entropy[i] Diff_sum = sum(list(difference_coefficient.values())) entropy_weight = {} for i in important_features: entropy_weight[i] = difference_coefficient[i] / Diff_sum feature_weight = feature_weight[important_features] feature_weight = np.mat(feature_weight) weight = np.array(list(entropy_weight.values())) overall_merit = weight * feature_weight.T overall_merit = overall_merit.T overall_merit = np.array(overall_merit) overall_list = [] for i in range(len(feature_weight)): overall_list.append(overall_merit[i][0]) overall = pd.DataFrame({'eventid': data_id, 'overall': overall_list}) overall = overall.sort_values(by=['overall'], ascending=False) overall.index = list(np.arange(len(data_norm))) data_norm = data_norm[important_features] overall = overall.sort_values(by=['eventid'], ascending=True) overall.index = list(np.arange(len(data_norm))) feature_names = list(entropy_weight.keys()) entropy_weight = list(entropy_weight.values()) norm_entropy_weight = [] for sub_weight in entropy_weight: norm_entropy_weight.append(sub_weight / sum(entropy_weight)) entropy_weight = dict(zip(feature_names, norm_entropy_weight)) return (entropy_weight, overall) """ author: zhenyu wu time: 2019/04/09 16:51 function: 利用模糊层次分析法确定指标的权重 params: r_1_n: r11~r1n的模糊关系 return: B: FAHP的权重计算结果 """ def FAHP(r_1_n): R = np.zeros((len(r_1_n), len(r_1_n))) E = np.zeros((len(r_1_n), len(r_1_n))) B = np.zeros(len(r_1_n)) R[0] = r_1_n col_1 = 1 - R[0] for i in range(len(r_1_n)): R[i] = R[0] - (R[0][0] - col_1[i]) e = R.sum(axis=1) for i in range(len(e)): for j in range(len(e)): E[i][j] = (e[i] - e[j]) / (2 * len(e)) + 0.5 e = E.sum(axis=0) for i in range(len(e)): B[i] = (2 * e[i] - 1) / (len(e) * (len(e) - 1)) return B """ author: zhenyu wu time: 2019/04/09 17:39 function: 利用模糊层次分析法确定所有指标权重 params: feature_names: 特征名称 return: all_fAHP_weight: 所有指标的fAHP权重 """ def fAHP_weight(feature_names): r_1_n = [0.5, 0.8, 0.8, 0.8] level_1_fAHP_weight = FAHP(r_1_n) r_1_n = [0.5, 0.6, 0.7, 0.8, 0.8] level_2_std_fAHP_weight = FAHP(r_1_n) level_2_std_fAHP_weight = list(level_2_std_fAHP_weight / sum(level_2_std_fAHP_weight) * level_1_fAHP_weight[0]) r_1_n = [0.5, 0.7, 0.8, 0.8] level_2_att_fAHP_weight = FAHP(r_1_n) level_2_att_fAHP_weight = list(level_2_att_fAHP_weight / sum(level_2_att_fAHP_weight) * level_1_fAHP_weight[1]) level_2_targ_fAHP_weight = [level_1_fAHP_weight[2]] r_1_n = [0.5, 0.5, 0.4, 0.4, 0.3, 0.4, 0.6, 0.6] level_2_kill_fAHP_weight = FAHP(r_1_n) level_2_kill_fAHP_weight = list(level_2_kill_fAHP_weight / sum(level_2_kill_fAHP_weight) * level_1_fAHP_weight[3]) all_fAHP_weight = level_2_std_fAHP_weight + level_2_att_fAHP_weight + level_2_targ_fAHP_weight + level_2_kill_fAHP_weight all_fAHP_weight = all_fAHP_weight / sum(all_fAHP_weight) AHP_weight = dict(zip(feature_names, all_fAHP_weight)) return AHP_weight fAHP_weight = fAHP_weight(all_data_norm.columns) print(fAHP_weight)
code
16157889/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import pandas as pd import warnings import os import numpy as np import folium from folium import plugins from sklearn.preprocessing import MinMaxScaler from copy import deepcopy import math import time from sklearn.cluster import AgglomerativeClustering from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn import mixture warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) all_data = pd.read_csv('../input/globalterrorismdb_0718dist.csv', encoding='ISO-8859-1') all_data.head()
code
16157889/cell_19
[ "text_plain_output_1.png" ]
from copy import deepcopy from sklearn.preprocessing import MinMaxScaler import math import numpy as np import pandas as pd import warnings import pandas as pd import warnings import os import numpy as np import folium from folium import plugins from sklearn.preprocessing import MinMaxScaler from copy import deepcopy import math import time from sklearn.cluster import AgglomerativeClustering from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn import mixture warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) all_data = pd.read_csv('../input/globalterrorismdb_0718dist.csv', encoding='ISO-8859-1') all_data = all_data[['eventid', 'iyear', 'imonth', 'iday', 'extended', 'country', 'region', 'vicinity', 'latitude', 'longitude', 'specificity', 'crit1', 'crit2', 'crit3', 'doubtterr', 'multiple', 'attacktype1', 'success', 'suicide', 'weaptype1', 'targtype1', 'nkill', 'nkillter', 'nwound', 'nwoundte', 'property', 'propextent', 'ishostkid', 'ransom', 'INT_LOG', 'INT_IDEO', 'INT_MISC', 'INT_ANY']] all_data = all_data.dropna(subset=['latitude']) all_data = all_data.dropna(subset=['longitude']) all_data.shape all_data['specificity'] = pd.to_numeric(all_data['specificity'], downcast='integer') all_data['doubtterr'] = pd.to_numeric(all_data['doubtterr'], downcast='integer') all_data['multiple'] = pd.to_numeric(all_data['multiple'], downcast='integer') all_data['nkill'] = pd.to_numeric(all_data['nkill'], downcast='integer') all_data['nkillter'] = pd.to_numeric(all_data['nkillter'], downcast='integer') all_data['nwound'] = pd.to_numeric(all_data['nwound'], downcast='integer') all_data['nwoundte'] = pd.to_numeric(all_data['nwoundte'], downcast='integer') all_data['ishostkid'] = pd.to_numeric(all_data['ishostkid'], downcast='integer') all_data['ransom'] = pd.to_numeric(all_data['ransom'], downcast='integer') """ author: zhenyu wu time: 2019/04/07 21:48 function: 对数据样本进行归一化处理,将区间放缩到某个范围内 在MinMaxScaler中是给定了一个明确的最大值与最小值。它的计算公式如下: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) | X_scaled = X_std / (max - min) + min 每个特征中的最小值变成了MIN,最大值变成了MAX。这样做是为了在熵权法取对数过程中避免向下溢出。 params: data: 数据集 MIN: 下限 MAX: 上限 return: data_norm: 归一化后的数据集 """ def normalization(data, MIN=0.002, MAX=0.998): min_max_scaler = MinMaxScaler(feature_range=(MIN, MAX)) data_norm = min_max_scaler.fit_transform(data) data_norm = pd.DataFrame(data_norm) data_norm.columns = data.columns return data_norm """ author: zhenyu wu time: 2019/04/07 21:47 function: 熵权法确定特征权重 params: data_norm: 数据集 threshold: 阈值 return: Entropy: 特征的熵值 difference_coefficient: 特征的差异系数 important_features: 重要特征名称序列 entropy_weight: 特征熵权 overall: 样本的重要性分数 """ def entropy(data_norm, data_id, threshold=0.8): feature_weight = pd.DataFrame({'temp': list(np.zeros(len(data_norm)))}) for i in data_norm.columns: Sum = data_norm[i].sum() temp = data_norm[i] / Sum feature_weight[i] = temp feature_weight.drop('temp', axis=1, inplace=True) Entropy = {} for i in feature_weight.columns: Sum = 0 column = list(deepcopy(feature_weight[i])) for j in range(len(feature_weight)): Sum += column[j] * math.log(column[j]) Entropy[i] = -1 / math.log(len(feature_weight)) * Sum important_features = [] for key, value in Entropy.items(): important_features.append(key) difference_coefficient = {} for i in important_features: difference_coefficient[i] = 1 - Entropy[i] Diff_sum = sum(list(difference_coefficient.values())) entropy_weight = {} for i in important_features: entropy_weight[i] = difference_coefficient[i] / Diff_sum feature_weight = feature_weight[important_features] feature_weight = np.mat(feature_weight) weight = np.array(list(entropy_weight.values())) overall_merit = weight * feature_weight.T overall_merit = overall_merit.T overall_merit = np.array(overall_merit) overall_list = [] for i in range(len(feature_weight)): overall_list.append(overall_merit[i][0]) overall = pd.DataFrame({'eventid': data_id, 'overall': overall_list}) overall = overall.sort_values(by=['overall'], ascending=False) overall.index = list(np.arange(len(data_norm))) data_norm = data_norm[important_features] overall = overall.sort_values(by=['eventid'], ascending=True) overall.index = list(np.arange(len(data_norm))) feature_names = list(entropy_weight.keys()) entropy_weight = list(entropy_weight.values()) norm_entropy_weight = [] for sub_weight in entropy_weight: norm_entropy_weight.append(sub_weight / sum(entropy_weight)) entropy_weight = dict(zip(feature_names, norm_entropy_weight)) return (entropy_weight, overall) print(entropy_weight)
code
16157889/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import warnings import os import numpy as np import folium from folium import plugins from sklearn.preprocessing import MinMaxScaler from copy import deepcopy import math import time from sklearn.cluster import AgglomerativeClustering from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn import mixture warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) all_data = pd.read_csv('../input/globalterrorismdb_0718dist.csv', encoding='ISO-8859-1') all_data = all_data[['eventid', 'iyear', 'imonth', 'iday', 'extended', 'country', 'region', 'vicinity', 'latitude', 'longitude', 'specificity', 'crit1', 'crit2', 'crit3', 'doubtterr', 'multiple', 'attacktype1', 'success', 'suicide', 'weaptype1', 'targtype1', 'nkill', 'nkillter', 'nwound', 'nwoundte', 'property', 'propextent', 'ishostkid', 'ransom', 'INT_LOG', 'INT_IDEO', 'INT_MISC', 'INT_ANY']] all_data = all_data.dropna(subset=['latitude']) all_data = all_data.dropna(subset=['longitude']) all_data.shape
code
16157889/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import pandas as pd import warnings import os import numpy as np import folium from folium import plugins from sklearn.preprocessing import MinMaxScaler from copy import deepcopy import math import time from sklearn.cluster import AgglomerativeClustering from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn import mixture warnings.filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) all_data = pd.read_csv('../input/globalterrorismdb_0718dist.csv', encoding='ISO-8859-1') all_data = all_data[['eventid', 'iyear', 'imonth', 'iday', 'extended', 'country', 'region', 'vicinity', 'latitude', 'longitude', 'specificity', 'crit1', 'crit2', 'crit3', 'doubtterr', 'multiple', 'attacktype1', 'success', 'suicide', 'weaptype1', 'targtype1', 'nkill', 'nkillter', 'nwound', 'nwoundte', 'property', 'propextent', 'ishostkid', 'ransom', 'INT_LOG', 'INT_IDEO', 'INT_MISC', 'INT_ANY']] all_data = all_data.dropna(subset=['latitude']) all_data = all_data.dropna(subset=['longitude']) all_data.shape print('all_data占据内存约: {:.2f} GB'.format(all_data.memory_usage().sum() / 1024 ** 3))
code
122250672/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.columns car.info()
code
122250672/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression lrmodel = LinearRegression() lrmodel.fit(X_train, Y_train)
code
122250672/cell_7
[ "image_output_1.png" ]
import pandas as pd car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.head()
code
122250672/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.columns car.isnull().sum() car.replace({'Fuel_Type': {'Petrol': 0, 'Diesel': 1, 'CNG': 2}}, inplace=True) car.replace({'Seller_Type': {'Dealer': 0, 'Individual': 1}}, inplace=True) car.replace({'Transmission': {'Manual': 0, 'Automatic': 1}}, inplace=True) corr = car.corr() sns.heatmap(corr, annot=True, cmap='crest') plt.show()
code
122250672/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.columns car.isnull().sum() car.replace({'Fuel_Type': {'Petrol': 0, 'Diesel': 1, 'CNG': 2}}, inplace=True) car.replace({'Seller_Type': {'Dealer': 0, 'Individual': 1}}, inplace=True) car.replace({'Transmission': {'Manual': 0, 'Automatic': 1}}, inplace=True) corr = car.corr() X = car.drop(['Car_Name', 'Selling_Price'], axis=1) Y = car['Selling_Price'] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42) lrmodel = LinearRegression() lrmodel.fit(X_train, Y_train) training_data_prediction = lrmodel.predict(X_train) Y_pred = lrmodel.predict(X_test) sns.regplot(x=Y_test, y=Y_pred, data=car, scatter_kws={'color': 'green'}, line_kws={'color': 'blue'})
code
122250672/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.columns
code
122250672/cell_10
[ "text_html_output_1.png" ]
import pandas as pd car = pd.read_csv('/kaggle/input/car-15/cardata.csv') car.columns car.isnull().sum()
code
34124456/cell_20
[ "text_plain_output_1.png" ]
def printinfo(name, age): """This prints a passed info into this function""" return def printinfo(name, age=35): """This prints a passed info into this function""" return def printinfo(arg1, *vartuple): """This prints a variable passed arguments""" print('Output is: ') print(arg1) for var in vartuple: print(var) return printinfo(10) printinfo(70, 60, 50)
code
34124456/cell_11
[ "text_plain_output_1.png" ]
def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" print(str) return printme()
code
34124456/cell_7
[ "text_plain_output_1.png" ]
def changeme(mylist): """This changes a passed list into this function""" print('Values inside the function before change: ', mylist) mylist[2] = 50 print('Values inside the function after change: ', mylist) return mylist = [10, 20, 30] changeme(mylist) print('Values outside the function: ', mylist)
code
34124456/cell_18
[ "text_plain_output_1.png" ]
def printinfo(name, age): """This prints a passed info into this function""" return def printinfo(name, age=35): """This prints a passed info into this function""" print('Name: ', name) print('Age ', age) return printinfo(age=50, name='miki') printinfo(name='miki')
code
34124456/cell_28
[ "text_plain_output_1.png" ]
sum = lambda arg1, arg2: arg1 + arg2 def sum(arg1, arg2): total = arg1 + arg2 return total total = sum(10, 20) total = 0 def sum(arg1, arg2): total = arg1 + arg2 print('Inside the function local total : ', total) return total sum(10, 20) print('Outside the function global total : ', total)
code
34124456/cell_16
[ "text_plain_output_1.png" ]
def printinfo(name, age): """This prints a passed info into this function""" print('Name: ', name) print('Age ', age) return printinfo(age=50, name='miki')
code
34124456/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" print(str) return printme(str='My string')
code
34124456/cell_22
[ "text_plain_output_1.png" ]
sum = lambda arg1, arg2: arg1 + arg2 print('Value of total : ', sum(10, 20)) print('Value of total : ', sum(20, 20))
code
34124456/cell_12
[ "text_plain_output_1.png" ]
def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" print(str) return printme('Hello World')
code
34124456/cell_5
[ "text_plain_output_1.png" ]
def printme(str): """This prints a passed string into this function""" return def printme(str): """This prints a passed string into this function""" print(str) return printme('This is first call to the user defined function!') printme('Again second call to the same function')
code
130027592/cell_42
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline model_1 = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB())]) model_1.fit(train_sentences, train_label) model_1_score = model_1.score(test_sentences, test_label) model_1_score model_1_preds = model_1.predict(test_sentences) model_1_preds
code
130027592/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data.head()
code
130027592/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') fake_data.info()
code
130027592/cell_34
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import random sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) random_sen = random.choice(train_sentences) random_sen text_vectorizer([random_sen])
code
130027592/cell_33
[ "text_plain_output_1.png" ]
import random random_sen = random.choice(train_sentences) random_sen
code
130027592/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data = all_data.dropna() all_data = all_data.sample(frac=1, random_state=42) all_data.head()
code
130027592/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') real_data.head()
code
130027592/cell_40
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline model_1 = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB())]) model_1.fit(train_sentences, train_label)
code
130027592/cell_29
[ "text_html_output_1.png" ]
import numpy as np # linear algebra sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq
code
130027592/cell_26
[ "text_plain_output_1.png" ]
sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10]
code
130027592/cell_48
[ "text_html_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping])
code
130027592/cell_41
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline model_1 = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB())]) model_1.fit(train_sentences, train_label) model_1_score = model_1.score(test_sentences, test_label) model_1_score
code
130027592/cell_54
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from sklearn.metrics import accuracy_score, precision_recall_fscore_support from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf def calculate_results(y_true, y_pred): model_accuracy = accuracy_score(y_true, y_pred) * 100 model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted') model_results = {'accuracy': model_accuracy, 'precision': model_precision, 'recall': model_recall, 'f1': model_f1} return model_results sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10] model_2_preds = tf.squeeze(tf.round(model_2_probs)) model_2_preds[:20] model_2_results = calculate_results(y_true=test_label, y_pred=model_2_preds) model_2_results
code
130027592/cell_60
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10] model_2_preds = tf.squeeze(tf.round(model_2_probs)) model_2_preds[:20] inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.LSTM(64, return_sequences=True)(x) x = layers.LSTM(64)(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_3 = tf.keras.Model(inputs, outputs, name='LSTM_model') model_3.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_3_history = model_3.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_3.summary()
code
130027592/cell_50
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary()
code
130027592/cell_52
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10]
code
130027592/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130027592/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') real_data.info()
code
130027592/cell_49
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) pd.DataFrame(model_2_history.history).plot()
code
130027592/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data = all_data.dropna() sns.barplot(x=all_data['target'].unique(), y=all_data['target'].value_counts(), palette='viridis')
code
130027592/cell_51
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label)
code
130027592/cell_59
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10] model_2_preds = tf.squeeze(tf.round(model_2_probs)) model_2_preds[:20] inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.LSTM(64, return_sequences=True)(x) x = layers.LSTM(64)(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_3 = tf.keras.Model(inputs, outputs, name='LSTM_model') model_3.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_3_history = model_3.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) pd.DataFrame(model_3_history.history).plot()
code
130027592/cell_58
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10] model_2_preds = tf.squeeze(tf.round(model_2_probs)) model_2_preds[:20] inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.LSTM(64, return_sequences=True)(x) x = layers.LSTM(64)(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_3 = tf.keras.Model(inputs, outputs, name='LSTM_model') model_3.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_3_history = model_3.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping])
code
130027592/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] (min(sent_lens), max(sent_lens))
code
130027592/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') fake_data.head()
code
130027592/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data = all_data.dropna() all_data.info()
code
130027592/cell_38
[ "text_plain_output_1.png" ]
from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import random sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) random_sen = random.choice(train_sentences) random_sen news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding print(embedding(text_vectorizer([random_sen])))
code
130027592/cell_3
[ "text_plain_output_1.png" ]
import tensorflow as tf from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns from tensorflow.keras.layers.experimental.preprocessing import TextVectorization from tensorflow.keras import layers from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score, precision_recall_fscore_support import random from keras.callbacks import EarlyStopping
code
130027592/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data = all_data.dropna() all_data['target'].value_counts().hist()
code
130027592/cell_35
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) print(f'Most common words in the vocabulary: {news_vocab[:5]}') print(f'Least common words in the vocabulary: {news_vocab[-5:]}')
code
130027592/cell_43
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline def calculate_results(y_true, y_pred): model_accuracy = accuracy_score(y_true, y_pred) * 100 model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted') model_results = {'accuracy': model_accuracy, 'precision': model_precision, 'recall': model_recall, 'f1': model_f1} return model_results model_1 = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB())]) model_1.fit(train_sentences, train_label) model_1_score = model_1.score(test_sentences, test_label) model_1_score model_1_preds = model_1.predict(test_sentences) model_1_preds model_1_results = calculate_results(y_true=test_label, y_pred=model_1_preds) model_1_results
code
130027592/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) real_data = pd.read_csv('/kaggle/input/fake-news-football/real.csv') fake_data = pd.read_csv('/kaggle/input/fake-news-football/fake.csv') all_data = pd.concat([real_data, fake_data], ignore_index=True) all_data.info()
code
130027592/cell_22
[ "text_plain_output_1.png" ]
(type(train_sentences), type(train_label))
code
130027592/cell_53
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.callbacks import EarlyStopping from tensorflow.keras import layers from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import numpy as np # linear algebra import tensorflow as tf sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) text_vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=output_sequence_length) text_vectorizer.adapt(train_sentences) news_vocab = text_vectorizer.get_vocabulary() (print(f'Number of words in vocabulary: {len(news_vocab)}'),) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding inputs = layers.Input(shape=(1,), dtype=tf.string) x = text_vectorizer(inputs) x = embedding(x) x = layers.Conv1D(filters=64, kernel_size=5, strides=1, activation='relu', padding='valid')(x) x = layers.GlobalMaxPooling1D()(x) outputs = layers.Dense(1, activation='sigmoid')(x) model_2 = tf.keras.Model(inputs, outputs, name='conv1D_model') early_stopping = EarlyStopping(monitor='val_loss', patience=5) model_2.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model_2_history = model_2.fit(train_sentences, train_label, validation_data=(test_sentences, test_label), batch_size=64, epochs=100, verbose=1, callbacks=[early_stopping]) model_2.summary() model_2.evaluate(test_sentences, test_label) model_2_probs = model_2.predict(test_sentences) model_2_probs[:10] model_2_preds = tf.squeeze(tf.round(model_2_probs)) model_2_preds[:20]
code
130027592/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len
code
130027592/cell_37
[ "text_plain_output_1.png" ]
from tensorflow.keras import layers import numpy as np # linear algebra sent_lens = [len(sentence.split()) for sentence in train_sentences] sent_lens[:10] avg_len = np.mean(sent_lens) avg_len out_len_seq = np.percentile(sent_lens, 95) out_len_seq max_tokens = 65000 output_sequence_length = int(out_len_seq) embedding = layers.Embedding(input_dim=max_tokens, output_dim=128, input_length=out_len_seq, embeddings_initializer='uniform') embedding
code
122244194/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.callbacks import EarlyStopping import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) checkpoint_dir = './check_v2' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True) e_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) EPOCHS = 100 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback, e_stop])
code
122244194/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122244194/cell_7
[ "image_output_1.png" ]
from tensorflow.keras.callbacks import EarlyStopping import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) checkpoint_dir = './check_v2' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True) e_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) EPOCHS = 100 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback, e_stop]) model.summary()
code
122244194/cell_8
[ "text_plain_output_1.png" ]
from tensorflow.keras.callbacks import EarlyStopping import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) checkpoint_dir = './check_v2' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True) e_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) EPOCHS = 100 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback, e_stop]) losses = pd.DataFrame(history.history) losses.plot()
code
122244194/cell_10
[ "text_plain_output_1.png" ]
from tensorflow.keras.callbacks import EarlyStopping import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) checkpoint_dir = './check_v2' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True) e_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) EPOCHS = 100 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback, e_stop]) losses = pd.DataFrame(history.history) fig = plt.figure(figsize=(12, 8)) plt.subplot(2, 1, 1) plt.plot(history.history['loss']) plt.title('Modellverlust') plt.ylabel('Verlust') plt.xlabel('Epoche') plt.legend(['Training', 'Validierung'], loc='upper right') plt.subplot(2, 1, 2) plt.plot(history.history['accuracy']) plt.title('Modellgenauigkeit') plt.ylabel('Genauigkeit') plt.xlabel('Epoche') plt.legend(['Training', 'Validierung'], loc='lower right') plt.tight_layout() plt.show()
code
122244194/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.callbacks import EarlyStopping import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) checkpoint_dir = './check_v2' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True) e_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) EPOCHS = 100 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback, e_stop]) model.summary() model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1) model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) model.build(tf.TensorShape([1, None])) def generate_text(model, start_string, t, numGen): num_generate = numGen input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) text_generated = [] temperature = t model.reset_states() for i in range(num_generate): predictions = model(input_eval) predictions = tf.squeeze(predictions, 0) predictions = predictions / temperature predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy() input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(idx2char[predicted_id]) genText = start_string + ''.join(text_generated) return genText print(generate_text(model, start_string=u'Was machst du?', t=0.5, numGen=200))
code
122244194/cell_5
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import tensorflow as tf with open('/kaggle/input/german-harry-potter/Stein.txt', 'r') as datei: TEXT = datei.read() with open('/kaggle/input/german-harry-potter/Orden.txt', 'r') as datei: Orden = datei.read() with open('/kaggle/input/german-harry-potter/Kammer.txt', 'r') as datei: Kammer = datei.read() with open('/kaggle/input/german-harry-potter/Heiligtuemer.txt', 'r') as datei: Heil = datei.read() TEXT = TEXT + ' ' + Orden + ' ' + Kammer + ' ' + Heil vocab = sorted(set(TEXT)) char2idx = {u: i for i, u in enumerate(vocab)} idx2char = np.array(vocab) TEXT_as_int = np.array([char2idx[c] for c in TEXT]) seq_length = 200 examples_per_epoch = len(TEXT) // (seq_length + 1) char_dataset = tf.data.Dataset.from_tensor_slices(TEXT_as_int) sequences = char_dataset.batch(seq_length + 1, drop_remainder=True) def split_input_target(chunk): input_TEXT = chunk[:-1] target_TEXT = chunk[1:] return (input_TEXT, target_TEXT) dataset = sequences.map(split_input_target) BATCH_SIZE = 128 BUFFER_SIZE = 1300 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) vocab_size = len(vocab) embedding_dim = 512 rnn_units = 1024 rnn_units2 = 512 def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.GRU(rnn_units2, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size)]) return model model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) print(example_batch_predictions.shape, '# (batch_size, sequence_length, vocab_size)')
code
33108213/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) sns.distplot(ted_data['views'])
code
33108213/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes
code
33108213/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) ted_data.head()
code
33108213/cell_2
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.head()
code
33108213/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) sns.distplot(ted_data[ted_data['duration'] < 3000]['duration'])
code
33108213/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33108213/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) import seaborn as sns ax = sns.barplot(x='duration', y='main_speaker', data=ted_data.sort_values('duration', ascending=False)[:20])
code
33108213/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) ax = sns.barplot(x='views', y='main_speaker', data=ted_data.sort_values('views', ascending=False)[:20])
code
33108213/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) ted_data[['title', 'main_speaker', 'comments', 'views', 'duration']].sort_values('comments', ascending=False).head(20)
code
33108213/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum()
code
33108213/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.stats as stats import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) # lets check if there is any correlation between views and duration import scipy.stats as stats ax = sns.jointplot(x='views', y='duration', data=ted_data) ax.annotate(stats.pearsonr) # shows the p values ax = sns.jointplot(x='views', y='comments', data=ted_data) ax.annotate(stats.pearsonr)
code
33108213/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) sns.distplot(ted_data[ted_data['views'] < 5000000.0]['views'])
code
33108213/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.stats as stats import seaborn as sns ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns def date_convert(x): return pd.to_datetime(x, unit='s') ted_data['film_date'] = ted_data['film_date'].apply(date_convert) ted_data['published_date'] = ted_data['published_date'].apply(date_convert) # lets check who talked the most, top 20 import seaborn as sns ax = sns.barplot(x="duration", y="main_speaker", data=ted_data.sort_values('duration', ascending=False)[:20]) # lets check who got the most views, top 20 ax = sns.barplot(x="views", y="main_speaker", data=ted_data.sort_values('views', ascending=False)[:20]) import scipy.stats as stats ax = sns.jointplot(x='views', y='duration', data=ted_data) ax.annotate(stats.pearsonr)
code
33108213/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted_data = pd.read_csv('/kaggle/input/ted-talks/ted_main.csv') ted_data.isnull().sum() ted_data.dtypes ted_data = ted_data.drop(['name'], axis=1) ted_data.columns
code
128046727/cell_42
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] show_wordcloud(cataract_df['left_diagnosys'], title='Prevalent words in left eye diagnosys for cataract')
code
128046727/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
!pip install openpyxl
code
128046727/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('sex', 'Sex', data_df, size=2)
code
128046727/cell_57
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import glob import imageio import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() import imageio IMAGE_PATH = "/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/Training Images" def show_images(df, title="Diagnosys", eye_exam="left_fundus"): print(f"{title}; eye exam: {eye_exam}") f, ax = plt.subplots(4,4, figsize=(16,16)) for i,idx in enumerate(df.index): dd = df.iloc[idx] image_name = dd[eye_exam] image_path = os.path.join(IMAGE_PATH, image_name) img_data=imageio.imread(image_path) ax[i//4, i%4].imshow(img_data) ax[i//4, i%4].axis('off') plt.show() df = data_df.loc[(data_df.cataract == 1) & (data_df.left_diagnosys == 'cataract')].sample(16).reset_index() df = data_df.loc[(data_df.cataract == 1) & (data_df.right_diagnosys == 'cataract')].sample(16).reset_index() show_images(df, title='Right eye with cataract', eye_exam='right_fundus')
code
128046727/cell_56
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import glob import imageio import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() import imageio IMAGE_PATH = "/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/Training Images" def show_images(df, title="Diagnosys", eye_exam="left_fundus"): print(f"{title}; eye exam: {eye_exam}") f, ax = plt.subplots(4,4, figsize=(16,16)) for i,idx in enumerate(df.index): dd = df.iloc[idx] image_name = dd[eye_exam] image_path = os.path.join(IMAGE_PATH, image_name) img_data=imageio.imread(image_path) ax[i//4, i%4].imshow(img_data) ax[i//4, i%4].axis('off') plt.show() df = data_df.loc[(data_df.cataract == 1) & (data_df.left_diagnosys == 'cataract')].sample(16).reset_index() show_images(df, title='Left eye with cataract', eye_exam='left_fundus')
code
128046727/cell_30
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('amd', 'AMD', data_df, size=2)
code
128046727/cell_33
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('other', 'Other', data_df, size=2)
code
128046727/cell_20
[ "text_html_output_1.png" ]
import glob import os print(f"train images: {len(os.listdir('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Training Images'))}") print(f"test images: {len(os.listdir('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Testing Images'))}") print(f"train images - left eye: {len(glob.glob('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Training Images//*_left.jpg'))}") print(f"train images - right eye: {len(glob.glob('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Training Images//*_right.jpg'))}") print(f"test images - left eye: {len(glob.glob('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Testing Images//*_left.jpg'))}") print(f"test images - right eye: {len(glob.glob('//kaggle//input//ocular-disease-recognition-odir5k//ODIR-5K//ODIR-5K//Testing Images//*_right.jpg'))}")
code
128046727/cell_40
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() show_wordcloud(data_df['left_diagnosys'], title='Prevalent words in right eye diagnosys')
code
128046727/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('cataract', 'Cataract', data_df, size=2)
code
128046727/cell_39
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() show_wordcloud(data_df['left_diagnosys'], title='Prevalent words in left eye diagnosys')
code
128046727/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() plot_count('normal', 'Normal', data_df, size=2)
code
128046727/cell_48
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Cataract diagnosys grouped by sex', data_df, 'cataract', size=2)
code
128046727/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.head()
code
128046727/cell_50
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_excel('/kaggle/input/ocular-disease-recognition-odir5k/ODIR-5K/ODIR-5K/data.xlsx', engine='openpyxl') data_df.columns = ['id', 'age', 'sex', 'left_fundus', 'right_fundus', 'left_diagnosys', 'right_diagnosys', 'normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'other'] def plot_count(feature, title, df, size=1, show_all=False): f, ax = plt.subplots(1,1, figsize=(4*size,4)) total = float(len(df)) if show_all: g = sns.countplot(df[feature], palette='Set3') g.set_title("{} distribution".format(title)) else: g = sns.countplot(df[feature], order = df[feature].value_counts().index[:20], palette='Set3') if(size > 2): plt.xticks(rotation=90, size=8) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 0.2, '{:1.2f}%'.format(100*height/total), ha="center") g.set_title("Number and percentage of {}".format(title)) plt.show() stopwords = set(STOPWORDS) def show_wordcloud(data, title = None): wordcloud = WordCloud( background_color='white', stopwords=stopwords, max_words=40, max_font_size=40, scale=3, random_state=1, ).generate(str(data)) fig = plt.figure(1, figsize=(12,10)) plt.axis('off') if title: fig.suptitle(title, fontsize=20) fig.subplots_adjust(top=2.3) plt.imshow(wordcloud) plt.show() cataract_df = data_df.loc[data_df.cataract == 1] def plot_feature_distribution_grouped(feature, title, df, hue, size=4): plt.figure(figsize=(size*5,size*2)) plt.title(title) if(size > 2): plt.xticks(rotation=90, size=8) g = sns.countplot(df[feature], hue=df[hue], palette='Set3') plt.xlabel(feature) plt.legend() plt.show() plot_feature_distribution_grouped('sex', 'Hypertension diagnosys grouped by sex', data_df, 'hypertension', size=2)
code