path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130007285/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') df.head()
code
130007285/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean()
code
130007285/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() Avg_delay_min.head()
code
130007285/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() Avg_ticket_price.head()
code
130007285/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() df.head()
code
130007285/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) sorted_df.head()
code
130007285/cell_22
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() max_value = np.max(df['Ticket Price']) max_value
code
130007285/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') selected_columns = ['Departure City', 'Arrival City', 'Flight Duration', 'Delay Minutes', 'Booking Class'] df_selected = df[selected_columns] df_selected
code
130007285/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') filtered_df = df[df['Delay Minutes'] > 60] filtered_df = df[df['Churned'] == False] filtered_df.head()
code
130007285/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') df.info()
code
17143749/cell_13
[ "text_plain_output_2.png" ]
chunk_iter = _smallstruct.groupby(['molecule_name']) pool = mp.Pool(4) funclist = [] for df in tqdm(chunk_iter): f = pool.apply_async(compute_all_yukawa, [df[1]]) funclist.append(f) result = [] for f in tqdm(funclist): result.append(f.get(timeout=120)) smallstruct2 = pd.concat(result)
code
17143749/cell_11
[ "text_html_output_1.png" ]
smallstruct1.head(10)
code
17143749/cell_16
[ "text_plain_output_1.png" ]
import numpy as np def compute_all_yukawa(x): return x.apply(compute_yukawa_matrix, axis=1, x2=x) def compute_yukawa_matrix(x, x2): notatom = x2[x2.atom_index != x['atom_index']].reset_index(drop=True) atom = x[['x', 'y', 'z']] charge = x[['nuclear_charge']] notatom['dist'] = ((notatom[['x', 'y', 'z']].values - atom.values) ** 2).sum(axis=1) notatom['dist'] = np.sqrt(notatom['dist'].astype(np.float32)) notatom['dist'] = charge.values * notatom[['nuclear_charge']].values.reshape(-1) * np.exp(-2 * notatom['dist'] / notatom['dist'].max()) / notatom['dist'] s = notatom.groupby('atom')['dist'].transform(lambda x: x.sort_values(ascending=False)) index0, index1 = ([], []) for i in notatom.atom.unique(): for j in range(notatom[notatom.atom == i].shape[0]): if j < 5: index1.append('dist_' + i + '_' + str(j)) index0.append(j) s.index = index0 s = s[s.index < 5] s.index = index1 return s np.allclose(smallstruct2.fillna(0), smallstruct1.fillna(0))
code
17143749/cell_14
[ "text_html_output_1.png" ]
smallstruct2.head(10)
code
17143749/cell_10
[ "text_plain_output_1.png" ]
smallstruct1 = _smallstruct.groupby('molecule_name').apply(compute_all_yukawa)
code
50241935/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min()
code
50241935/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() monetary.head(5)
code
50241935/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.info() data.describe()
code
50241935/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') master.head(5)
code
50241935/cell_19
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) RFM = k.merge(monetary, on='CustomerID') RFM = RFM.merge(df, on='CustomerID') Q1 = RFM.Amount.quantile(0.25) Q3 = RFM.Amount.quantile(0.75) IQR = Q3 - Q1 RFM = RFM[(RFM.Amount >= Q1 - 1.5 * IQR) & (RFM.Amount <= Q3 + 1.5 * IQR)] RFM_norm1 = RFM.drop('CustomerID', axis=1) RFM_norm1.Recency = RFM_norm1.Recency.dt.days from sklearn.preprocessing import StandardScaler standard_scaler = StandardScaler() RFM_norm1 = standard_scaler.fit_transform(RFM_norm1) RFM_norm1 = pd.DataFrame(RFM_norm1) RFM_norm1.columns = ['Frequency', 'Amount', 'Recency'] model_clus5 = KMeans(n_clusters=5, max_iter=50) model_clus5.fit(RFM_norm1)
code
50241935/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import scale from sklearn.cluster import KMeans from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import dendrogram, cut_tree import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50241935/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum()
code
50241935/cell_18
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) RFM = k.merge(monetary, on='CustomerID') RFM = RFM.merge(df, on='CustomerID') Q1 = RFM.Amount.quantile(0.25) Q3 = RFM.Amount.quantile(0.75) IQR = Q3 - Q1 RFM = RFM[(RFM.Amount >= Q1 - 1.5 * IQR) & (RFM.Amount <= Q3 + 1.5 * IQR)] RFM_norm1 = RFM.drop('CustomerID', axis=1) RFM_norm1.Recency = RFM_norm1.Recency.dt.days from sklearn.preprocessing import StandardScaler standard_scaler = StandardScaler() RFM_norm1 = standard_scaler.fit_transform(RFM_norm1) RFM_norm1 = pd.DataFrame(RFM_norm1) RFM_norm1.columns = ['Frequency', 'Amount', 'Recency'] RFM_norm1.head(5)
code
50241935/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) amount.head(5)
code
50241935/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) RFM = k.merge(monetary, on='CustomerID') RFM = RFM.merge(df, on='CustomerID') RFM.head(5)
code
50241935/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) RFM = k.merge(monetary, on='CustomerID') RFM = RFM.merge(df, on='CustomerID') plt.boxplot(RFM.Amount) Q1 = RFM.Amount.quantile(0.25) Q3 = RFM.Amount.quantile(0.75) IQR = Q3 - Q1 RFM = RFM[(RFM.Amount >= Q1 - 1.5 * IQR) & (RFM.Amount <= Q3 + 1.5 * IQR)]
code
50241935/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.head(5)
code
50241935/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] master = monetary.merge(k, on='CustomerID', how='inner') recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) RFM = k.merge(monetary, on='CustomerID') RFM = RFM.merge(df, on='CustomerID') Q1 = RFM.Amount.quantile(0.25) Q3 = RFM.Amount.quantile(0.75) IQR = Q3 - Q1 RFM = RFM[(RFM.Amount >= Q1 - 1.5 * IQR) & (RFM.Amount <= Q3 + 1.5 * IQR)] RFM.head(5)
code
50241935/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate a = recency.groupby('CustomerID') a['diff'].min() df = pd.DataFrame(recency.groupby(['CustomerID', 'diff']).min()) df = df.reset_index() df = df.drop('InvoiceDate', axis=1) df = df.rename(columns={'diff': 'Recency'}) df.head(5)
code
50241935/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] k.head(5)
code
50241935/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0] order_wise = data.dropna() order_wise.shape order_wise.isnull().sum() amount = pd.DataFrame(order_wise['Quantity'] * order_wise['UnitPrice'], columns=['Amount']) order_wise = pd.concat(objs=[order_wise, amount], axis=1, ignore_index=False) monetary = order_wise.groupby('CustomerID').Amount.sum() monetary = monetary.reset_index() frequency = order_wise[['CustomerID', 'InvoiceNo']] k = frequency.groupby('CustomerID').InvoiceNo.count() k = pd.DataFrame(k) k = k.reset_index() k.columns = ['CustomerID', 'Frequency'] recency = order_wise[['CustomerID', 'InvoiceDate']] maximum = max(recency.InvoiceDate) maximum = maximum + pd.DateOffset(days=1) recency['diff'] = maximum - recency.InvoiceDate recency.head(5)
code
50241935/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('../input/online-retail-data-set-from-uci-ml-repo/Online Retail.xlsx') data['InvoiceDate'] = pd.to_datetime(data['InvoiceDate'], format='%d-%m-%Y %H:%M') data.shape data.isnull().sum() * 100 / data.shape[0]
code
105210534/cell_42
[ "image_output_1.png" ]
l = [('1', 1), ('2', 2), ('3', 3)] max(l)
code
105210534/cell_21
[ "text_plain_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.figure(figsize=[12, 5.01]) plt.pie(sorted_counts, labels=sorted_counts.index, startangle=90, counterclock=False) plt.axis('square')
code
105210534/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] for i in np.arange(len(percentages)): print(f'the precent of {int(train_target.index[i])} is : {np.round(percentages[i], 2)} %')
code
105210534/cell_25
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_target = test_data[187].value_counts() test_target
code
105210534/cell_34
[ "text_plain_output_1.png" ]
from sklearn.utils import resample from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.axis('square') test_target = test_data[187].value_counts() plt.bar(train_target.index, train_target.values, color='green')
code
105210534/cell_33
[ "text_plain_output_1.png" ]
from sklearn.utils import resample from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.axis('square') test_target = test_data[187].value_counts() np.random.seed(2020) sample2 = np.random.choice(test_data.shape[0], 200, replace=False) subset2 = test_data.loc[sample2] percentages = [count / subset2.shape[0] * 100 for count in subset2[187].value_counts()] percentages[0] from sklearn.utils import resample target1 = test_data[test_data[187] == 1] target2 = test_data[test_data[187] == 2] target3 = test_data[test_data[187] == 3] target4 = test_data[test_data[187] == 4] target0 = test_data[test_data[187] == 0].sample(n=2000, random_state=0) target1_sample = resample(target1, replace=True, n_samples=2000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=2000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=2000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=2000, random_state=0) test_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) test_target = test_data[187].value_counts() test_target
code
105210534/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] np.random.seed(2020) sample2 = np.random.choice(test_data.shape[0], 200, replace=False) subset2 = test_data.loc[sample2] percentages = [count / subset2.shape[0] * 100 for count in subset2[187].value_counts()] percentages[0] for i in np.arange(len(percentages)): print(f'the precent of {int(test_data.index[i])} is : {np.round(percentages[i], 2)} %')
code
105210534/cell_26
[ "image_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.axis('square') test_target = test_data[187].value_counts() plt.bar(test_target.index, test_target.values, color=sb.color_palette()[0])
code
105210534/cell_41
[ "image_output_1.png" ]
l = [('1', 1), ('2', 2), ('3', 3)] if max(l): print(l)
code
105210534/cell_2
[ "image_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105210534/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] subset
code
105210534/cell_19
[ "text_plain_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() plt.bar(train_target.index, train_target.values, color='green')
code
105210534/cell_50
[ "text_plain_output_1.png" ]
from scipy import stats from sklearn.utils import resample from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.axis('square') test_target = test_data[187].value_counts() np.random.seed(2020) sample2 = np.random.choice(test_data.shape[0], 200, replace=False) subset2 = test_data.loc[sample2] percentages = [count / subset2.shape[0] * 100 for count in subset2[187].value_counts()] percentages[0] sorted_counts = train_data[187].value_counts() plt.axis('square') pearson_coef, p_value = stats.pearsonr(train_data[1], train_data[187]) effictive_list = [] for i in train_data.columns: if i != 187: pearson_coef, p_value = stats.pearsonr(train_data[i], train_data[187]) effictive_list.append((f'column number : {i}', f'Pearson Correlation {pearson_coef}', f'P-value : {p_value}')) else: break def add_guassian_noise(signal): noise = np.random.normal(0, 0.05, 186) return signal + noise noise_data = add_guassian_noise(train_data.iloc[0, :186]) plt.figure(figsize=[12, 4.02]) plt.plot(train_data.iloc[0, :186], color='green') plt.title('data without noise') plt.xlabel('ECG in mili volts') plt.ylabel('time in seconds')
code
105210534/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() train_target
code
105210534/cell_18
[ "text_plain_output_1.png" ]
from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() train_target
code
105210534/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] np.random.seed(2020) sample2 = np.random.choice(test_data.shape[0], 200, replace=False) subset2 = test_data.loc[sample2] percentages = [count / subset2.shape[0] * 100 for count in subset2[187].value_counts()] percentages[0]
code
105210534/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() plt.bar(train_target.index, train_target.values, color=sb.color_palette()[4])
code
105210534/cell_46
[ "text_plain_output_1.png" ]
from scipy import stats from sklearn.utils import resample from sklearn.utils import resample import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) pearson_coef, p_value = stats.pearsonr(train_data[1], train_data[187]) effictive_list = [] for i in train_data.columns: if i != 187: pearson_coef, p_value = stats.pearsonr(train_data[i], train_data[187]) effictive_list.append((f'column number : {i}', f'Pearson Correlation {pearson_coef}', f'P-value : {p_value}')) else: break print('*' * 75) print(max(effictive_list)) print('*' * 75)
code
105210534/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index sb.countplot(data=subset, y=187, color=base_color, order=type_orderion) plt.xticks(rotation=90)
code
105210534/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0]
code
105210534/cell_36
[ "text_plain_output_1.png" ]
from sklearn.utils import resample from sklearn.utils import resample import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sb train_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) test_data = pd.read_csv('/kaggle/input/heartbeat/mitbih_train.csv', header=None) train_target = train_data[187].value_counts() np.random.seed(2018) sample = np.random.choice(train_data.shape[0], 200, replace=False) subset = train_data.loc[sample] percentages = [count / subset.shape[0] * 100 for count in subset[187].value_counts()] percentages[0] base_color = sb.color_palette()[0] type_orderion = subset[187].value_counts().index plt.xticks(rotation=90) from sklearn.utils import resample target1 = train_data[train_data[187] == 1] target2 = train_data[train_data[187] == 2] target3 = train_data[train_data[187] == 3] target4 = train_data[train_data[187] == 4] target0 = train_data[train_data[187] == 0].sample(n=20000, random_state=42) target1_sample = resample(target1, replace=True, n_samples=20000, random_state=0) target2_sample = resample(target2, replace=True, n_samples=20000, random_state=0) target3_sample = resample(target3, replace=True, n_samples=20000, random_state=0) target4_sample = resample(target4, replace=True, n_samples=20000, random_state=0) train_data = pd.concat([target0, target1_sample, target2_sample, target3_sample, target4_sample]) train_target = train_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.axis('square') test_target = test_data[187].value_counts() sorted_counts = train_data[187].value_counts() plt.figure(figsize=[12, 5.01]) plt.pie(sorted_counts, labels=sorted_counts.index, startangle=90, counterclock=False) plt.axis('square')
code
48165941/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_link = pd.read_csv('../input/dataset/datasets.csv') df = df_link.copy() df = df.dropna(axis=0, subset=['Source']) df.head()
code
48165941/cell_6
[ "image_output_1.png" ]
from skimage import io import matplotlib.pyplot as plt from skimage import io image = io.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0245.JPG') plt.imshow(image)
code
48165941/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
48165941/cell_7
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import os list = os.listdir('../input/data-image/BurgosPuertaDeLaCoroneria') for i in range(len(list)): print(list[i])
code
48165941/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from skimage import data, color from skimage import io from skimage.transform import rescale, resize, downscale_local_mean import cv2 import cv2 import imageio import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import os from skimage import io image = io.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0245.JPG') list = os.listdir('../input/data-image/BurgosPuertaDeLaCoroneria') import matplotlib.image as mpimg def process(filename): image = mpimg.imread('../input/data-image/BurgosPuertaDeLaCoroneria/' + filename) for file in list: process(file) import matplotlib.pyplot as plt import cv2 from skimage import data, color from skimage.transform import rescale, resize, downscale_local_mean img = cv2.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0247.JPG') image = color.rgb2gray(img) image_rescaled = rescale(image, 0.25, anti_aliasing=False) image_resized = resize(image, (image.shape[0] // 4, image.shape[1] // 4), anti_aliasing=True) image_downscaled = downscale_local_mean(image, (4, 3)) fig, axes = plt.subplots(nrows=2, ncols=2) ax = axes.ravel() ax[0].imshow(image, cmap='gray') ax[0].set_title("Original image") ax[1].imshow(image_rescaled, cmap='gray') ax[1].set_title("Rescaled image (aliasing)") ax[2].imshow(image_resized, cmap='gray') ax[2].set_title("Resized image (no aliasing)") ax[3].imshow(image_downscaled, cmap='gray') ax[3].set_title("Downscaled image (no aliasing)") ax[0].set_xlim(0, 512) ax[0].set_ylim(512, 0) plt.tight_layout() plt.show() import cv2 import numpy as np import matplotlib.pyplot as plt import imageio import imutils cv2.ocl.setUseOpenCL(False) # read images and transform them to grayscale # Make sure that the train image is the image that will be transformed trainImg = imageio.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0247.JPG') trainImg_gray = cv2.cvtColor(trainImg, cv2.COLOR_RGB2GRAY) queryImg = imageio.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0246.JPG') # Opencv defines the color channel in the order BGR. # Transform it to RGB to be compatible to matplotlib queryImg_gray = cv2.cvtColor(queryImg, cv2.COLOR_RGB2GRAY) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, constrained_layout=False, figsize=(16,9)) ax1.imshow(queryImg, cmap="gray") ax1.set_xlabel("Query image", fontsize=14) ax2.imshow(trainImg, cmap="gray") ax2.set_xlabel("Train image (Image to be transformed)", fontsize=14) plt.show() def detectAndDescribe(image, method=None): """ Compute key points and feature descriptors using an specific method """ assert method is not None, "You need to define a feature detection method. Values are: 'sift', 'surf'" if method == 'sift': descriptor = cv2.xfeatures2d.SIFT_create() elif method == 'surf': descriptor = cv2.xfeatures2d.SURF_create() elif method == 'brisk': descriptor = cv2.BRISK_create() elif method == 'orb': descriptor = cv2.ORB_create() kps, features = descriptor.detectAndCompute(image, None) return (kps, features) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 8), constrained_layout=False) ax1.imshow(cv2.drawKeypoints(trainImg_gray, kpsA, None, color=(0, 255, 0))) ax1.set_xlabel('', fontsize=14) ax2.imshow(cv2.drawKeypoints(queryImg_gray, kpsB, None, color=(0, 255, 0))) ax2.set_xlabel('(b)', fontsize=14) plt.show()
code
48165941/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from skimage import io import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import os from skimage import io image = io.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0245.JPG') list = os.listdir('../input/data-image/BurgosPuertaDeLaCoroneria') import matplotlib.image as mpimg def process(filename): image = mpimg.imread('../input/data-image/BurgosPuertaDeLaCoroneria/' + filename) plt.figure() plt.imshow(image) for file in list: process(file)
code
48165941/cell_15
[ "text_plain_output_1.png" ]
from skimage import data, color from skimage import io from skimage.transform import rescale, resize, downscale_local_mean import cv2 import cv2 import imageio import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import os from skimage import io image = io.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0245.JPG') list = os.listdir('../input/data-image/BurgosPuertaDeLaCoroneria') import matplotlib.image as mpimg def process(filename): image = mpimg.imread('../input/data-image/BurgosPuertaDeLaCoroneria/' + filename) for file in list: process(file) import matplotlib.pyplot as plt import cv2 from skimage import data, color from skimage.transform import rescale, resize, downscale_local_mean img = cv2.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0247.JPG') image = color.rgb2gray(img) image_rescaled = rescale(image, 0.25, anti_aliasing=False) image_resized = resize(image, (image.shape[0] // 4, image.shape[1] // 4), anti_aliasing=True) image_downscaled = downscale_local_mean(image, (4, 3)) fig, axes = plt.subplots(nrows=2, ncols=2) ax = axes.ravel() ax[0].imshow(image, cmap='gray') ax[0].set_title("Original image") ax[1].imshow(image_rescaled, cmap='gray') ax[1].set_title("Rescaled image (aliasing)") ax[2].imshow(image_resized, cmap='gray') ax[2].set_title("Resized image (no aliasing)") ax[3].imshow(image_downscaled, cmap='gray') ax[3].set_title("Downscaled image (no aliasing)") ax[0].set_xlim(0, 512) ax[0].set_ylim(512, 0) plt.tight_layout() plt.show() import cv2 import numpy as np import matplotlib.pyplot as plt import imageio import imutils cv2.ocl.setUseOpenCL(False) trainImg = imageio.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0247.JPG') trainImg_gray = cv2.cvtColor(trainImg, cv2.COLOR_RGB2GRAY) queryImg = imageio.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0246.JPG') queryImg_gray = cv2.cvtColor(queryImg, cv2.COLOR_RGB2GRAY) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, constrained_layout=False, figsize=(16, 9)) ax1.imshow(queryImg, cmap='gray') ax1.set_xlabel('Query image', fontsize=14) ax2.imshow(trainImg, cmap='gray') ax2.set_xlabel('Train image (Image to be transformed)', fontsize=14) plt.show()
code
48165941/cell_3
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_link = pd.read_csv('../input/dataset/datasets.csv') df_link.head()
code
48165941/cell_10
[ "text_html_output_1.png" ]
from skimage import data, color from skimage import io from skimage.transform import rescale, resize, downscale_local_mean import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import os from skimage import io image = io.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0245.JPG') list = os.listdir('../input/data-image/BurgosPuertaDeLaCoroneria') import matplotlib.image as mpimg def process(filename): image = mpimg.imread('../input/data-image/BurgosPuertaDeLaCoroneria/' + filename) for file in list: process(file) import matplotlib.pyplot as plt import cv2 from skimage import data, color from skimage.transform import rescale, resize, downscale_local_mean img = cv2.imread('../input/data-image/BurgosPuertaDeLaCoroneria/DPP_0247.JPG') image = color.rgb2gray(img) image_rescaled = rescale(image, 0.25, anti_aliasing=False) image_resized = resize(image, (image.shape[0] // 4, image.shape[1] // 4), anti_aliasing=True) image_downscaled = downscale_local_mean(image, (4, 3)) fig, axes = plt.subplots(nrows=2, ncols=2) ax = axes.ravel() ax[0].imshow(image, cmap='gray') ax[0].set_title('Original image') ax[1].imshow(image_rescaled, cmap='gray') ax[1].set_title('Rescaled image (aliasing)') ax[2].imshow(image_resized, cmap='gray') ax[2].set_title('Resized image (no aliasing)') ax[3].imshow(image_downscaled, cmap='gray') ax[3].set_title('Downscaled image (no aliasing)') ax[0].set_xlim(0, 512) ax[0].set_ylim(512, 0) plt.tight_layout() plt.show()
code
48165941/cell_12
[ "text_html_output_1.png" ]
!pip install imutils
code
48165941/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests df_link = pd.read_csv('../input/dataset/datasets.csv') df = df_link.copy() df = df.dropna(axis=0, subset=['Source']) import requests from io import BytesIO from PIL import Image for i in range(100): r = requests.get(df['Source'][i]) print('Status:', r.status_code) print(r.url)
code
72103063/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() df_notpotable.isnull().sum() df.Potability.value_counts() df_potable.isnull().sum() df = pd.concat([df_notpotable, df_potable]) df = df.sample(frac=1) x = df.drop('Potability', axis=1) y = df['Potability'] df.hist(bins=10, figsize=(20, 15), color='teal')
code
72103063/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_potable.isnull().sum() df_potable.isnull().sum()
code
72103063/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_potable.isnull().sum()
code
72103063/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() df_notpotable.isnull().sum() df.Potability.value_counts() df_potable.isnull().sum() df = pd.concat([df_notpotable, df_potable]) df = df.sample(frac=1) x = df.drop('Potability', axis=1) y = df['Potability'] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x) x = scaler.transform(x) x = pd.DataFrame(x) x
code
72103063/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() print('number of rows: ', df.shape[0]) print('number of column: ', df.shape[1]) df.Potability.value_counts()
code
72103063/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_notpotable.isnull().sum()
code
72103063/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72103063/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() df_notpotable.isnull().sum() df.Potability.value_counts() df_potable.isnull().sum() df = pd.concat([df_notpotable, df_potable]) df = df.sample(frac=1) df.head()
code
72103063/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum()
code
72103063/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() df_notpotable.isnull().sum() df.Potability.value_counts() df_potable.isnull().sum() df = pd.concat([df_notpotable, df_potable]) df.head()
code
72103063/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.head()
code
72103063/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() df_notpotable.isnull().sum() df.Potability.value_counts() df_potable.isnull().sum() df = pd.concat([df_notpotable, df_potable]) df = df.sample(frac=1) x = df.drop('Potability', axis=1) y = df['Potability'] import matplotlib.pyplot as plt fig = plt.figure(figsize=(25, 10)) p1 = fig.add_subplot(2, 2, 1) p1.hist(df.ph[df.Potability == 0], bins=20, alpha=0.4) p1.hist(df.ph[df.Potability == 1], bins=20, alpha=0.4) plt.title('pH') plt.xlabel('pH') plt.ylabel('Count') labels = ['0', '1'] plt.legend(labels) p1 = fig.add_subplot(2, 2, 2) p1.hist(df.Hardness[df.Potability == 0], bins=20, alpha=0.4) p1.hist(df.Hardness[df.Potability == 1], bins=20, alpha=0.4) plt.title('Hardness') plt.xlabel('Hardness') plt.ylabel('Count') labels = ['0', '1'] plt.legend(labels) p1 = fig.add_subplot(2, 2, 3) p1.hist(df.Solids[df.Potability == 0], bins=20, alpha=0.4) p1.hist(df.Solids[df.Potability == 1], bins=20, alpha=0.4) plt.title('Solids') plt.xlabel('Solids') plt.ylabel('Count') labels = ['0', '1'] plt.legend(labels) p1 = fig.add_subplot(2, 2, 4) p1.hist(df.Chloramines[df.Potability == 0], bins=20, alpha=0.4) p1.hist(df.Chloramines[df.Potability == 1], bins=20, alpha=0.4) plt.title('Chloramines') plt.xlabel('Chloramines') plt.ylabel('Count') labels = ['0', '1'] plt.legend(labels) plt.subplots_adjust(wspace=0.1, hspace=0.3) plt.show()
code
72103063/cell_10
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df_notpotable = df[df['Potability'] == 0] df_potable = df[df['Potability'] == 1] df_notpotable.isnull().sum() df_potable.isnull().sum() from sklearn.impute import SimpleImputer impute = SimpleImputer(missing_values=np.nan, strategy='mean') impute.fit(df_notpotable[['ph']]) impute.fit(df_notpotable[['Sulfate']]) impute.fit(df_notpotable[['Trihalomethanes']]) df_notpotable['ph'] = impute.transform(df_notpotable[['ph']]) df_notpotable['Sulfate'] = impute.transform(df_notpotable[['Sulfate']]) df_notpotable['Trihalomethanes'] = impute.transform(df_notpotable[['Trihalomethanes']]) impute.fit(df_potable[['ph']]) impute.fit(df_potable[['Sulfate']]) impute.fit(df_potable[['Trihalomethanes']]) df_potable['ph'] = impute.transform(df_potable[['ph']]) df_potable['Sulfate'] = impute.transform(df_potable[['Sulfate']]) df_potable['Trihalomethanes'] = impute.transform(df_potable[['Trihalomethanes']])
code
72103063/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum() df.Potability.value_counts() df.Potability.value_counts()
code
72103063/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/water-potability/water_potability.csv') df.isnull().sum()
code
122261656/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) print(data[44505, :] == data[46157, :])
code
122261656/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') data.head()
code
122261656/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) def Jaccard(i, j): iData = data[i, :] jData = data[j, :] count1 = 0 count2 = 0 for k in range(length): if max(iData[k], jData[k]): count1 += 1 if min(iData[k], jData[k]): count2 += 1 return count2 / count1 Jaccard(1, 2)
code
122261656/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] print(data) np.random.seed(0) n = 30 m = data.shape[0] print(m) length = data.shape[1] print(length) signMatrix = np.zeros((n, m))
code
122261656/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import jaccard_score from tqdm import tqdm import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) from sklearn.metrics import jaccard_score jaccardDict = {} for i in tqdm(range(1, m)): jaccardDict[i] = jaccard_score(data[0, :], data[i, :]) sorted(jaccardDict.items(), key=lambda x: x[1], reverse=True)[:30]
code
122261656/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) for i in range(n): seq = np.arange(0, length) np.random.shuffle(seq) for j in range(m): for k in seq: if data[j][k] == 1: signMatrix[i][j] = k + 1 break signMatrix
code
122261656/cell_16
[ "text_plain_output_1.png" ]
from tqdm import tqdm import itertools import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) def bucketAllocate(r, b): Bucket = {} for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if hashValue not in Bucket: Bucket[hashValue] = [j] else: Bucket[hashValue].append(j) return Bucket r = 30 b = 1 bucket = bucketAllocate(r, b) def Jaccard(i, j): iData = data[i, :] jData = data[j, :] count1 = 0 count2 = 0 for k in range(length): if max(iData[k], jData[k]): count1 += 1 if min(iData[k], jData[k]): count2 += 1 return count2 / count1 Jaccard(1, 2) import itertools from tqdm import tqdm MaxSimilarity = 0 SimilarPair = None for obj in tqdm(bucket.values()): for p in itertools.combinations(obj, 2): jaccardSim = Jaccard(p[0], p[1]) if jaccardSim > MaxSimilarity: MaxSimilarity = jaccardSim SimilarPair = p def bucketFind(r, b): Bucket = {} hash0 = 0 for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if j == 0: hash0 = hashValue elif hashValue == hash0: Bucket[j] = Jaccard(0, j) return Bucket r = 10 b = 3 bucket = bucketFind(r, b) print(bucket)
code
122261656/cell_17
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import itertools import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) def bucketAllocate(r, b): Bucket = {} for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if hashValue not in Bucket: Bucket[hashValue] = [j] else: Bucket[hashValue].append(j) return Bucket r = 30 b = 1 bucket = bucketAllocate(r, b) def Jaccard(i, j): iData = data[i, :] jData = data[j, :] count1 = 0 count2 = 0 for k in range(length): if max(iData[k], jData[k]): count1 += 1 if min(iData[k], jData[k]): count2 += 1 return count2 / count1 Jaccard(1, 2) import itertools from tqdm import tqdm MaxSimilarity = 0 SimilarPair = None for obj in tqdm(bucket.values()): for p in itertools.combinations(obj, 2): jaccardSim = Jaccard(p[0], p[1]) if jaccardSim > MaxSimilarity: MaxSimilarity = jaccardSim SimilarPair = p def bucketFind(r, b): Bucket = {} hash0 = 0 for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if j == 0: hash0 = hashValue elif hashValue == hash0: Bucket[j] = Jaccard(0, j) return Bucket r = 10 b = 3 bucket = bucketFind(r, b) sorted(bucket.items(), key=lambda x: x[1], reverse=True)[:30]
code
122261656/cell_10
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) def bucketAllocate(r, b): Bucket = {} for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if hashValue not in Bucket: Bucket[hashValue] = [j] else: Bucket[hashValue].append(j) return Bucket r = 30 b = 1 bucket = bucketAllocate(r, b) print(len(bucket))
code
122261656/cell_12
[ "text_plain_output_1.png" ]
from tqdm import tqdm import itertools import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/docs-for-lsh/docs_for_lsh.csv') import numpy as np data = data.values[:, 1:] np.random.seed(0) n = 30 m = data.shape[0] length = data.shape[1] signMatrix = np.zeros((n, m)) def bucketAllocate(r, b): Bucket = {} for i in range(b): tmp = signMatrix[b:b + r] for j in range(m): signBand = list(tmp[:, j]) signBand.append(b) hashValue = hash(str(signBand)) if hashValue not in Bucket: Bucket[hashValue] = [j] else: Bucket[hashValue].append(j) return Bucket r = 30 b = 1 bucket = bucketAllocate(r, b) def Jaccard(i, j): iData = data[i, :] jData = data[j, :] count1 = 0 count2 = 0 for k in range(length): if max(iData[k], jData[k]): count1 += 1 if min(iData[k], jData[k]): count2 += 1 return count2 / count1 Jaccard(1, 2) import itertools from tqdm import tqdm MaxSimilarity = 0 SimilarPair = None for obj in tqdm(bucket.values()): for p in itertools.combinations(obj, 2): jaccardSim = Jaccard(p[0], p[1]) if jaccardSim > MaxSimilarity: MaxSimilarity = jaccardSim SimilarPair = p print(SimilarPair) print(MaxSimilarity)
code
88094115/cell_13
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred model.coef_
code
88094115/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') df.head()
code
88094115/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x
code
88094115/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred
code
88094115/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88094115/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') y = df.iloc[:, 1].values y
code
88094115/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y plt.scatter(x, y) plt.show()
code
88094115/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred model.coef_ model.intercept_ model.predict([[4]])
code
88094115/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred r2_score(y, y_pred) * 100
code
88094115/cell_14
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y) y_pred = model.predict(x) y_pred model.coef_ model.intercept_
code
88094115/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/years-of-experience-and-salary-dataset/Salary_Data.csv') x = df[['YearsExperience']] x y = df.iloc[:, 1].values y from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x, y)
code