path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
74067865/cell_7 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import plotly.graph_objects as go
healthsysdf = pd.read_csv('../input/world-bank-wdi-212-health-systems/2.12_Health_systems.csv')
healthsysdf = healthsysdf.drop(columns='Province_State')
healthsysdf = healthsysdf.drop(columns='Country_Region')
healthsysdf['Total_Gov_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_public_pct_2016, axis=1)
healthsysdf['Outofpocket_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_out_of_pocket_pct_2016, axis=1)
healthsysdf['Other_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 - row.Total_Gov_Spend - row.Outofpocket_Spend, axis=1)
countrycodes = ['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'ATG', 'ARG', 'ARM', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR', 'BEL', 'BLZ', 'BEN', 'BTN', 'BOL', 'BIH', 'BWA', 'BRA', 'BRN', 'BGR', 'BFA', 'BDI', 'CPV', 'KHM', 'CMR', 'CAN', '', 'CAF', 'TCD', '', 'CHL', 'CHN', '', '', 'COL', 'COM', 'COD', 'COG', 'CRI', 'CIV', 'HRV', 'CUB', 'CYP', 'CZE', 'DNK', 'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST', 'SWZ', 'ETH', '', 'FJI', 'FIN', 'FRA', '', 'GAB', 'GMB', 'GEO', 'DEU', 'GHA', 'GRC', '', 'GRD', '', 'GTM', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HUN', 'ISL', 'IND', 'IDN', 'IRN', 'IRQ', 'IRL', '', 'ISR', 'ITA', 'JAM', 'JPN', 'JOR', 'KAZ', 'KEN', 'KIR', '', 'KOR', '', 'KWT', 'KGZ', 'LAO', 'LVA', 'LBN', 'LSO', 'LBR', '', '', 'LTU', 'LUX', 'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MHL', 'MRT', 'MUS', 'MEX', 'FSM', 'MDA', 'MCO', 'MNG', 'MNE', 'MAR', 'MOZ', 'MMR', 'NAM', 'NPL', 'NLD', '', 'NZL', 'NGA', 'NER', 'NGA', 'MKD', '', 'NOR', 'OMN', 'PAK', 'PLW', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL', 'PRT', '', 'QAT', 'ROU', 'RUS', 'RWA', 'WSM', 'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', '', 'SVK', 'SVN', 'SLB', '', 'ZAF', '', 'ESP', 'LKA', 'KNA', 'LCA', '', 'VCT', 'SDN', 'SUR', 'SWE', 'CHE', '', 'TJK', 'TZA', 'THA', 'TLS', 'TGO', 'TON', 'TTO', 'TUN', 'TUR', 'TKM', '', 'TUV', 'UGA', 'UKR', 'ARE', 'GBR', 'USA', 'URY', 'UZB', 'VUT', 'VEN', 'VNM', '', '', 'YEM', 'ZMB', 'ZWE']
healthsysdf['Country_Codes'] = countrycodes
bginfo = pd.read_csv('../input/undata-country-profiles/country_profile_variables.csv')
bginfo.rename(columns={'country': 'World_Bank_Name'}, inplace=True)
bginfo = bginfo.replace({'United States of America': 'United States', 'Viet Nam': 'Vietnam'})
healthsysdf = healthsysdf.replace({'Yemen, Rep.': 'Yemen'})
healthsysdf = pd.merge(healthsysdf, bginfo, on='World_Bank_Name', how='outer')
healthsysdf = healthsysdf.dropna(thresh=3)
badgdp = healthsysdf[healthsysdf['GDP: Gross domestic product (million current US$)'] < 0].index
healthsysdf.drop(badgdp, inplace=True)
healthsysdf.replace({'SouthernAsia': 'Asia', 'WesternAsia': 'Asia', 'EasternAsia': 'Asia', 'CentralAsia': 'Asia', 'South-easternAsia': 'Asia', 'WesternEurope': 'Europe', 'SouthernEurope': 'Europe', 'EasternEurope': 'Europe', 'NorthernEurope': 'Europe', 'NorthernAfrica': 'Africa', 'MiddleAfrica': 'Africa', 'WesternAfrica': 'Africa', 'EasternAfrica': 'Africa', 'SouthernAfrica': 'Africa', 'SouthAmerica': 'Americas', 'Caribbean': 'Americas', 'CentralAmerica': 'Americas', 'NorthernAmerica': 'Americas', 'Polynesia': 'Oceania', 'Melanesia': 'Oceania', 'Micronesia': 'Oceania'}, inplace=True)
total_exp = healthsysdf.sort_values('Health_exp_pct_GDP_2016', ascending=False)
top_ten_exp = total_exp.head(10)
total_exp = total_exp.sort_values('Health_exp_pct_GDP_2016')
low_ten_exp = total_exp.head(10)
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(go.Bar(x=top_ten_exp['World_Bank_Name'], y=top_ten_exp['Health_exp_pct_GDP_2016']), row=1, col=1)
fig.add_trace(go.Bar(x=low_ten_exp['World_Bank_Name'], y=low_ten_exp['Health_exp_pct_GDP_2016']), row=1, col=2)
fig.update_layout(title={'text': 'Ten highest and lowest spenders', 'y': 0.9, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}, plot_bgcolor='white', paper_bgcolor='white', yaxis_title='% of GDP spent on healthcare', showlegend=False, font=dict(family='Courier New, monospace', size=14, color='#7f7f7f'))
fig.show() | code |
74067865/cell_15 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd
import plotly.graph_objects as go
import plotly.graph_objects as go
healthsysdf = pd.read_csv('../input/world-bank-wdi-212-health-systems/2.12_Health_systems.csv')
healthsysdf = healthsysdf.drop(columns='Province_State')
healthsysdf = healthsysdf.drop(columns='Country_Region')
healthsysdf['Total_Gov_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_public_pct_2016, axis=1)
healthsysdf['Outofpocket_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 / 100 * row.Health_exp_out_of_pocket_pct_2016, axis=1)
healthsysdf['Other_Spend'] = healthsysdf.apply(lambda row: row.Health_exp_pct_GDP_2016 - row.Total_Gov_Spend - row.Outofpocket_Spend, axis=1)
countrycodes = ['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'ATG', 'ARG', 'ARM', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR', 'BEL', 'BLZ', 'BEN', 'BTN', 'BOL', 'BIH', 'BWA', 'BRA', 'BRN', 'BGR', 'BFA', 'BDI', 'CPV', 'KHM', 'CMR', 'CAN', '', 'CAF', 'TCD', '', 'CHL', 'CHN', '', '', 'COL', 'COM', 'COD', 'COG', 'CRI', 'CIV', 'HRV', 'CUB', 'CYP', 'CZE', 'DNK', 'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST', 'SWZ', 'ETH', '', 'FJI', 'FIN', 'FRA', '', 'GAB', 'GMB', 'GEO', 'DEU', 'GHA', 'GRC', '', 'GRD', '', 'GTM', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HUN', 'ISL', 'IND', 'IDN', 'IRN', 'IRQ', 'IRL', '', 'ISR', 'ITA', 'JAM', 'JPN', 'JOR', 'KAZ', 'KEN', 'KIR', '', 'KOR', '', 'KWT', 'KGZ', 'LAO', 'LVA', 'LBN', 'LSO', 'LBR', '', '', 'LTU', 'LUX', 'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MHL', 'MRT', 'MUS', 'MEX', 'FSM', 'MDA', 'MCO', 'MNG', 'MNE', 'MAR', 'MOZ', 'MMR', 'NAM', 'NPL', 'NLD', '', 'NZL', 'NGA', 'NER', 'NGA', 'MKD', '', 'NOR', 'OMN', 'PAK', 'PLW', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL', 'PRT', '', 'QAT', 'ROU', 'RUS', 'RWA', 'WSM', 'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', '', 'SVK', 'SVN', 'SLB', '', 'ZAF', '', 'ESP', 'LKA', 'KNA', 'LCA', '', 'VCT', 'SDN', 'SUR', 'SWE', 'CHE', '', 'TJK', 'TZA', 'THA', 'TLS', 'TGO', 'TON', 'TTO', 'TUN', 'TUR', 'TKM', '', 'TUV', 'UGA', 'UKR', 'ARE', 'GBR', 'USA', 'URY', 'UZB', 'VUT', 'VEN', 'VNM', '', '', 'YEM', 'ZMB', 'ZWE']
healthsysdf['Country_Codes'] = countrycodes
bginfo = pd.read_csv('../input/undata-country-profiles/country_profile_variables.csv')
bginfo.rename(columns={'country': 'World_Bank_Name'}, inplace=True)
bginfo = bginfo.replace({'United States of America': 'United States', 'Viet Nam': 'Vietnam'})
healthsysdf = healthsysdf.replace({'Yemen, Rep.': 'Yemen'})
healthsysdf = pd.merge(healthsysdf, bginfo, on='World_Bank_Name', how='outer')
healthsysdf = healthsysdf.dropna(thresh=3)
badgdp = healthsysdf[healthsysdf['GDP: Gross domestic product (million current US$)'] < 0].index
healthsysdf.drop(badgdp, inplace=True)
healthsysdf.replace({'SouthernAsia': 'Asia', 'WesternAsia': 'Asia', 'EasternAsia': 'Asia', 'CentralAsia': 'Asia', 'South-easternAsia': 'Asia', 'WesternEurope': 'Europe', 'SouthernEurope': 'Europe', 'EasternEurope': 'Europe', 'NorthernEurope': 'Europe', 'NorthernAfrica': 'Africa', 'MiddleAfrica': 'Africa', 'WesternAfrica': 'Africa', 'EasternAfrica': 'Africa', 'SouthernAfrica': 'Africa', 'SouthAmerica': 'Americas', 'Caribbean': 'Americas', 'CentralAmerica': 'Americas', 'NorthernAmerica': 'Americas', 'Polynesia': 'Oceania', 'Melanesia': 'Oceania', 'Micronesia': 'Oceania'}, inplace=True)
total_exp = healthsysdf.sort_values('Health_exp_pct_GDP_2016', ascending = False)
top_ten_exp = total_exp.head(10)
total_exp = total_exp.sort_values('Health_exp_pct_GDP_2016')
low_ten_exp = total_exp.head(10)
fig = make_subplots(rows=1, cols=2, shared_yaxes=True)
fig.add_trace(
go.Bar(x=top_ten_exp['World_Bank_Name'], y=top_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=1
)
fig.add_trace(
go.Bar(x=low_ten_exp['World_Bank_Name'], y=low_ten_exp['Health_exp_pct_GDP_2016']),
row=1, col=2
)
fig.update_layout(
title={
'text': "Ten highest and lowest spenders",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
plot_bgcolor= 'white',
paper_bgcolor= 'white',
yaxis_title="% of GDP spent on healthcare",
showlegend=False,
font=dict(
family="Courier New, monospace",
size=14,
color="#7f7f7f"
)
)
fig.show()
import plotly.graph_objects as go
import pandas as pd
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Health_exp_pct_GDP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Percentage of GDP spent on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['Total_Gov_Spend'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, colorbar_tickprefix='% ', marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Government Spending on Healthcare', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
fig = go.Figure(data=go.Choropleth(locations=healthsysdf['Country_Codes'], z=healthsysdf['per_capita_exp_PPP_2016'], text=healthsysdf['World_Bank_Name'], colorscale='blues', autocolorscale=False, marker_line_color='darkgray', marker_line_width=0.5))
fig.update_layout(title_text='Healthcare Spending per Capita', font=dict(family='Courier New, monospace', size=14), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'))
g8_list = ['Canada', 'United Kingdom', 'United States', 'Russian Federation', 'Germany', 'France', 'Japan', 'China']
g8_sub = healthsysdf.loc[healthsysdf['World_Bank_Name'].isin(g8_list)]
g8_sub = g8_sub.sort_values('Health_exp_pct_GDP_2016', ascending=False)
fig = go.Figure()
fig.add_trace(go.Bar(x=g8_sub['World_Bank_Name'], y=g8_sub['Health_exp_pct_GDP_2016'], name='Total Spending', marker_color='darkblue'))
fig.add_trace(go.Bar(x=g8_sub['World_Bank_Name'], y=g8_sub['Total_Gov_Spend'], name='Government Spending', marker_color='mediumaquamarine'))
fig.add_trace(go.Bar(x=g8_sub['World_Bank_Name'], y=g8_sub['Outofpocket_Spend'], name='Private (out of pocket) Spending', marker_color='lightsteelblue'))
fig.add_trace(go.Bar(x=g8_sub['World_Bank_Name'], y=g8_sub['Other_Spend'], name='Other', marker_color='grey'))
fig.update_layout(barmode='group', title={'text': 'G8 Healthcare spending', 'y': 0.9, 'x': 0.4, 'xanchor': 'center', 'yanchor': 'top'}, plot_bgcolor='white', paper_bgcolor='white', yaxis_title='% of GDP spent on healthcare', showlegend=True, font=dict(family='Courier New, monospace', size=14, color='#7f7f7f'))
fig.show() | code |
89136628/cell_21 | [
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['marketCap'], 'N/A', data[symbol]['stats']['year1ChangePercent'], 'N/A', data[symbol]['stats']['month6ChangePercent'], 'N/A', data[symbol]['stats']['month3ChangePercent'], 'N/A', data[symbol]['stats']['month1ChangePercent'], 'N/A', 'N/A'], index=hqm_columns), ignore_index=True)
hqm_dataframe.columns
hqm_dataframe.head() | code |
89136628/cell_9 | [
"text_html_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import requests
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data | code |
89136628/cell_25 | [
"text_html_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['marketCap'], 'N/A', data[symbol]['stats']['year1ChangePercent'], 'N/A', data[symbol]['stats']['month6ChangePercent'], 'N/A', data[symbol]['stats']['month3ChangePercent'], 'N/A', data[symbol]['stats']['month1ChangePercent'], 'N/A', 'N/A'], index=hqm_columns), ignore_index=True)
hqm_dataframe.columns
hqm_dataframe.sort_values('One-Year Price Return', ascending=False, inplace=True)
hqm_dataframe = hqm_dataframe[:51]
hqm_dataframe.reset_index(drop=True, inplace=True)
len(hqm_dataframe)
hqm_dataframe[hqm_dataframe.isnull().any(axis=1)]
hqm_dataframe.dropna(axis=0, inplace=True)
len(hqm_dataframe[hqm_dataframe.isnull().any(axis=1)].index) | code |
89136628/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
momentum_srategy.head() | code |
89136628/cell_23 | [
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['marketCap'], 'N/A', data[symbol]['stats']['year1ChangePercent'], 'N/A', data[symbol]['stats']['month6ChangePercent'], 'N/A', data[symbol]['stats']['month3ChangePercent'], 'N/A', data[symbol]['stats']['month1ChangePercent'], 'N/A', 'N/A'], index=hqm_columns), ignore_index=True)
hqm_dataframe.columns
hqm_dataframe.sort_values('One-Year Price Return', ascending=False, inplace=True)
hqm_dataframe = hqm_dataframe[:51]
hqm_dataframe.reset_index(drop=True, inplace=True)
len(hqm_dataframe) | code |
89136628/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
recommended_trades.head() | code |
89136628/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89136628/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
symbol_groups = list(chunks(trades['Ticker'], 100))
symbol_strings = []
for i in range(0, len(symbol_groups)):
symbol_strings.append(','.join(symbol_groups[i]))
symbol_strings | code |
89136628/cell_19 | [
"text_html_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['marketCap'], 'N/A', data[symbol]['stats']['year1ChangePercent'], 'N/A', data[symbol]['stats']['month6ChangePercent'], 'N/A', data[symbol]['stats']['month3ChangePercent'], 'N/A', data[symbol]['stats']['month1ChangePercent'], 'N/A', 'N/A'], index=hqm_columns), ignore_index=True)
hqm_dataframe.columns | code |
89136628/cell_7 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
trades.head() | code |
89136628/cell_17 | [
"text_html_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
len(hqm_dataframe) | code |
89136628/cell_14 | [
"text_html_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data | code |
89136628/cell_27 | [
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
from scipy import stats
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
import requests
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
IEX_CLOUD_API_TOKEN = 'Tpk_ddf77a77f6e7464390bb2adc85a2be11'
secret_value_0 = user_secrets.get_secret('IEX_CLOUD_API_TOKEN')
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
data
trades[trades['Ticker'] == 'VIAC']
trades.drop(index=trades[trades['Ticker'] == 'VIAC'].index, inplace=True)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
trades_data = pd.DataFrame(columns=my_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/v1/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
trades_data = trades_data.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['stats']['year1ChangePercent'], 'N/A'], index=my_columns), ignore_index=True)
trades_data
hqm_columns = ['Ticker', 'Price', 'Market Capitalization', 'Number of Shares to Buy', 'One-Year Price Return', 'One-Year Return Percentile', 'Six-Month Price Return', 'Six-Month Return Percentile', 'Three-Month Price Return', 'Three-Month Return Percentile', 'One-Month Price Return', 'One-Month Return Percentile', 'HQM Score']
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in trades['Ticker']:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(pd.Series([symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['marketCap'], 'N/A', data[symbol]['stats']['year1ChangePercent'], 'N/A', data[symbol]['stats']['month6ChangePercent'], 'N/A', data[symbol]['stats']['month3ChangePercent'], 'N/A', data[symbol]['stats']['month1ChangePercent'], 'N/A', 'N/A'], index=hqm_columns), ignore_index=True)
hqm_dataframe.columns
hqm_dataframe.sort_values('One-Year Price Return', ascending=False, inplace=True)
hqm_dataframe = hqm_dataframe[:51]
hqm_dataframe.reset_index(drop=True, inplace=True)
len(hqm_dataframe)
hqm_dataframe[hqm_dataframe.isnull().any(axis=1)]
hqm_dataframe.dropna(axis=0, inplace=True)
len(hqm_dataframe[hqm_dataframe.isnull().any(axis=1)].index)
from scipy import stats
time_periods = ['One-Year', 'Six-Month', 'Three-Month', 'One-Month']
for row in hqm_dataframe.index:
for time_period in time_periods:
hqm_dataframe.loc[row, f'{time_period} Return Percentile'] = stats.percentileofscore(hqm_dataframe[f'{time_period} Price Return'], hqm_dataframe.loc[row, f'{time_period} Price Return']) / 100
for time_period in time_periods:
print(hqm_dataframe[f'{time_period} Return Percentile'] * 100)
hqm_dataframe | code |
89136628/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
value_strategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/value_strategy_1.csv.csv')
recommended_trades = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/recommended_trades_1.csv.csv')
momentum_srategy = pd.read_csv('/kaggle/input/algorithmic-trading-dataset/momentum_strategy_1.csv.csv')
trades = pd.read_csv('/kaggle/input/sp-500-stocks/sp_500_stocks.csv')
value_strategy.head() | code |
50244989/cell_9 | [
"text_plain_output_1.png"
] | from autoviml.Auto_NLP import Auto_NLP
train_x, test_x, final, predicted = Auto_NLP(input_feature, train, test, target, score_type='balanced_accuracy', top_num_features=100, modeltype='Classification', verbose=2, build_model=True) | code |
50244989/cell_6 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | test.head() | code |
50244989/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | !pip install autoviml | code |
50244989/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/train_E6oV3lV.csv')
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
from sklearn.model_selection import train_test_split
from autoviml.Auto_NLP import Auto_NLP
train, test = train_test_split(df, test_size=0.2) | code |
50244989/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/train_E6oV3lV.csv')
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
final.predict(test_x[input_feature])
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
final.predict(testing[input_feature])
sample = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/sample_submission_gfvA5FD.csv')
sample['label'] = final.predict(testing[input_feature]) | code |
50244989/cell_10 | [
"text_plain_output_1.png"
] | final.predict(test_x[input_feature]) | code |
50244989/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/train_E6oV3lV.csv')
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
final.predict(test_x[input_feature])
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
final.predict(testing[input_feature]) | code |
50244989/cell_5 | [
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/train_E6oV3lV.csv')
testing = pd.read_csv('../input/twitter-sentiment-analysis-analytics-vidya/test_tweets_anuFYb8.csv')
df.head() | code |
16168103/cell_4 | [
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls() | code |
16168103/cell_20 | [
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) | code |
16168103/cell_6 | [
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
print(f'Classes: \n {data.classes}')
data.show_batch(rows=8, figsize=(10, 10)) | code |
16168103/cell_26 | [
"text_html_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001))
inter = ClassificationInterpretation.from_learner(learn)
inter.plot_confusion_matrix(figsize=(10, 10)) | code |
16168103/cell_18 | [
"text_html_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2)
learn.lr_find()
learn.recorder.plot() | code |
16168103/cell_8 | [
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.recorder.plot() | code |
16168103/cell_16 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2) | code |
16168103/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001))
inter = ClassificationInterpretation.from_learner(learn)
inter.plot_top_losses(10, figsize=(20, 20)) | code |
16168103/cell_22 | [
"text_html_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.save('overwatch-stage-1')
learn.unfreeze()
learn.fit_one_cycle(2)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001))
learn.recorder.plot_losses() | code |
16168103/cell_10 | [
"text_plain_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5) | code |
16168103/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | path = Path('../input/heroes/heroes/')
path.ls()
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.1, ds_tfms=get_transforms(max_warp=0, flip_vert=True, do_flip=True), size=128, bs=16).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learn.lr_find()
learn.fit_one_cycle(5)
learn.recorder.plot_losses() | code |
334111/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split
test.shape
train = train[train['Semana'] > 8]
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
del train
print('Division_Set_Shapes:', X.shape, y.shape)
print('Validation_Set_Shapes:', X_train.shape, X_test.shape)
del X
del y | code |
334111/cell_6 | [
"text_plain_output_1.png"
] | test.shape | code |
334111/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import xgboost as xgb
import pandas as pd
import math
import os
import sys
from sklearn.cross_validation import train_test_split
from ml_metrics import rmsle | code |
334111/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | num_rounds = 50
del xg_train | code |
334111/cell_7 | [
"text_plain_output_1.png"
] | dtype = {'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint16}
filename = '../input/train.csv'
train.head() | code |
334111/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | train = train[train['Semana'] > 8]
print('Training_Shape:', train.shape) | code |
334111/cell_15 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from ml_metrics import rmsle
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)]
return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5)
test.shape
train = train[train['Semana'] > 8]
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
del train
del X
del y
params = {}
params['objective'] = 'reg:linear'
params['eta'] = 0.1
params['max_depth'] = 5
params['subsample'] = 0.8
params['colsample_bytree'] = 0.6
params['silent'] = True
params['booster'] = 'gbtree'
test_preds = np.zeros(test.shape[0])
xg_train = xgb.DMatrix(X_train, label=y_train)
del X_train
del y_train
xg_test = xgb.DMatrix(X_test)
del X_test
watchlist = [(xg_train, 'train')]
preds = xgclassifier.predict(xg_test, ntree_limit=xgclassifier.best_iteration)
print('RMSLE Score:', rmsle(y_test, preds))
del preds
del y_test | code |
334111/cell_14 | [
"text_plain_output_1.png"
] | from ml_metrics import rmsle
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)]
return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5)
test.shape
train = train[train['Semana'] > 8]
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
del train
del X
del y
params = {}
params['objective'] = 'reg:linear'
params['eta'] = 0.1
params['max_depth'] = 5
params['subsample'] = 0.8
params['colsample_bytree'] = 0.6
params['silent'] = True
params['booster'] = 'gbtree'
test_preds = np.zeros(test.shape[0])
xg_train = xgb.DMatrix(X_train, label=y_train)
del X_train
del y_train
xg_test = xgb.DMatrix(X_test)
del X_test
watchlist = [(xg_train, 'train')]
preds = xgclassifier.predict(xg_test, ntree_limit=xgclassifier.best_iteration)
xgb.plot_importance(xgclassifier) | code |
334111/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from ml_metrics import rmsle
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)]
return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5)
test.shape
train = train[train['Semana'] > 8]
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
del train
del X
del y
params = {}
params['objective'] = 'reg:linear'
params['eta'] = 0.1
params['max_depth'] = 5
params['subsample'] = 0.8
params['colsample_bytree'] = 0.6
params['silent'] = True
params['booster'] = 'gbtree'
test_preds = np.zeros(test.shape[0])
xg_train = xgb.DMatrix(X_train, label=y_train)
del X_train
del y_train
xg_test = xgb.DMatrix(X_test)
del X_test
watchlist = [(xg_train, 'train')]
preds = xgclassifier.predict(xg_test, ntree_limit=xgclassifier.best_iteration)
print('RMSLE Score:', rmsle(y_test, preds)) | code |
334111/cell_5 | [
"text_plain_output_1.png"
] | print('Loading Test...')
dtype_test = {'id': np.uint32, 'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16}
test.head() | code |
17144046/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
movie_df = pd.read_csv('../input/u.item', sep='|', encoding='latin-1', header=None)
movie_df = movie_df[[0, 1]]
movie_df.columns = ['movie_id', 'movie_name']
movie_df
mean_ratings = ratings_df.groupby('movie_id').agg({'rating': 'mean'}).reset_index().rename({'rating': 'mean_rating'}, axis=1)
count_ratings = ratings_df.groupby('movie_id').agg({'rating': 'count'}).reset_index().rename({'rating': 'count_rating'}, axis=1)
mean_ratings
base_model_df = movie_df.merge(mean_ratings).merge(count_ratings)
base_model_df
sns.scatterplot(x='count_rating', y='mean_rating', data=base_model_df) | code |
17144046/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
movie_df = pd.read_csv('../input/u.item', sep='|', encoding='latin-1', header=None)
movie_df = movie_df[[0, 1]]
movie_df.columns = ['movie_id', 'movie_name']
movie_df | code |
17144046/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
mean_ratings = ratings_df.groupby('movie_id').agg({'rating': 'mean'}).reset_index().rename({'rating': 'mean_rating'}, axis=1)
count_ratings = ratings_df.groupby('movie_id').agg({'rating': 'count'}).reset_index().rename({'rating': 'count_rating'}, axis=1)
mean_ratings | code |
17144046/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df | code |
17144046/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
17144046/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
movie_df = pd.read_csv('../input/u.item', sep='|', encoding='latin-1', header=None)
movie_df = movie_df[[0, 1]]
movie_df.columns = ['movie_id', 'movie_name']
movie_df
mean_ratings = ratings_df.groupby('movie_id').agg({'rating': 'mean'}).reset_index().rename({'rating': 'mean_rating'}, axis=1)
count_ratings = ratings_df.groupby('movie_id').agg({'rating': 'count'}).reset_index().rename({'rating': 'count_rating'}, axis=1)
mean_ratings
base_model_df = movie_df.merge(mean_ratings).merge(count_ratings)
base_model_df | code |
17144046/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
ratings_df['rating'].max() | code |
17144046/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
movie_df = pd.read_csv('../input/u.item', sep='|', encoding='latin-1', header=None)
movie_df = movie_df[[0, 1]]
movie_df.columns = ['movie_id', 'movie_name']
movie_df
mean_ratings = ratings_df.groupby('movie_id').agg({'rating': 'mean'}).reset_index().rename({'rating': 'mean_rating'}, axis=1)
count_ratings = ratings_df.groupby('movie_id').agg({'rating': 'count'}).reset_index().rename({'rating': 'count_rating'}, axis=1)
mean_ratings
base_model_df = movie_df.merge(mean_ratings).merge(count_ratings)
base_model_df
sns.lmplot(x='count_rating', y='mean_rating', data=base_model_df) | code |
17144046/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ratings_df = pd.read_csv('../input/u.data', sep='\t', names=['user_id', 'movie_id', 'rating', 'ts'])
ratings_df
movie_df = pd.read_csv('../input/u.item', sep='|', encoding='latin-1', header=None)
movie_df = movie_df[[0, 1]]
movie_df.columns = ['movie_id', 'movie_name']
movie_df
user_df = pd.read_csv('../input/u.user', sep='|', encoding='latin-1', header=None)
user_df = user_df[[0, 1]]
user_df.columns = ['user_id', 'age']
user_df | code |
89139708/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
print(torch.min(orig_image))
print(torch.max(orig_image)) | code |
89139708/cell_9 | [
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_1.png",
"text_html_output_8.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | from pytorch_lightning.loggers import WandbLogger
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger) | code |
89139708/cell_25 | [
"image_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.trainer import Trainer
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger)
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
data_mod = CelebADataModule(csv_path=csv_path, image_dir=img_path, train_transforms=image_transforms['train'], val_transforms=image_transforms['val'], batch_size=32, num_workers=3)
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch')
trainer = Trainer(gpus=1, max_epochs=6, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer.fit(model, data_mod) | code |
89139708/cell_34 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | from IPython.core.display import display, HTML
from PIL import Image
from io import BytesIO
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.trainer import Trainer
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import base64
import matplotlib.pyplot as plt
import os
import pandas as pd
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger)
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
def show_image(image_tensor):
image_tensor = torch.permute(image_tensor.cpu(), (1, 2, 0))
plt.axis('off')
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
transformed_image = Image.open(single_image_path).convert('RGB')
transformed_image = image_transforms['train'](transformed_image)
class CelebADataModule(pl.LightningDataModule):
def __init__(self, csv_path: str, image_dir: str, train_transforms, val_transforms, batch_size=16, **dataloader_kwargs):
super(CelebADataModule, self).__init__()
dataloader_kwargs.setdefault('num_workers', 2)
self.csv_path = csv_path
self.image_dir = image_dir
self.transforms = {'train': train_transforms, 'val': val_transforms}
self.bs = batch_size
self.dataloader_kwargs = dataloader_kwargs
self._label_to_node_idx = {-1: 0, 1: 1}
self.image_labels = None
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
def setup(self, stage=None):
attributes = pd.read_csv(self.csv_path)
self.image_labels = []
for i in range(len(attributes)):
ith_sample = attributes.iloc[i]
image_name = ith_sample[0]
label = ith_sample['Male']
label = self._label_to_node_idx[label]
self.image_labels.append((image_name, label))
total_d = len(self.image_labels)
train_data = self.image_labels[:total_d - 5000]
test_data = self.image_labels[total_d - 5000:total_d - 2500]
val_data = self.image_labels[total_d - 2500:]
self.train_dataset = CelebADataset(train_data, self.image_dir, self.transforms['train'])
self.test_dataset = CelebADataset(test_data, self.image_dir, self.transforms['val'])
self.val_dataset = CelebADataset(val_data, self.image_dir, self.transforms['val'])
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.bs, shuffle=True, **self.dataloader_kwargs)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.bs, **self.dataloader_kwargs)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.bs, **self.dataloader_kwargs)
data_mod = CelebADataModule(csv_path=csv_path, image_dir=img_path, train_transforms=image_transforms['train'], val_transforms=image_transforms['val'], batch_size=32, num_workers=3)
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch')
trainer = Trainer(gpus=1, max_epochs=6, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer.fit(model, data_mod)
trainer.validate(model, data_mod)
best_model_path = trainer.checkpoint_callback.best_model_path
model = LitModel.load_from_checkpoint(best_model_path, n_classes=2)
from io import BytesIO
import base64
gender_target = {0: 'Female', 1: 'Male'}
def img_to_display(filename):
i = Image.open(filename)
i.thumbnail((200, 200), Image.LANCZOS)
with BytesIO() as buffer:
i.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def display_result(filename, prediction, target):
"""
Display the results in HTML
"""
gender = 'Male'
gender_icon = 'https://i.imgur.com/nxWan2u.png'
if prediction[1] <= 0.5:
gender_icon = 'https://i.imgur.com/oAAb8rd.png'
gender = 'Female'
display_html = '\n <div style="overflow: auto; border: 2px solid #D8D8D8;\n padding: 5px; width: 420px;" >\n <img src="data:image/jpeg;base64,{}" style="float: left;" width="200" height="200">\n <div style="padding: 10px 0px 0px 20px; overflow: auto;">\n <img src="{}" style="float: left;" width="40" height="40">\n <h3 style="margin-left: 50px; margin-top: 2px;">{}</h3>\n <p style="margin-left: 50px; margin-top: -6px; font-size: 12px">{} prob.</p>\n <p style="margin-left: 50px; margin-top: -16px; font-size: 12px">Real Target: {}</p>\n <p style="margin-left: 50px; margin-top: -16px; font-size: 12px">Filename: {}</p>\n </div>\n </div>\n '.format(gender_icon, gender, '{0:.2f}%'.format(round(torch.max(prediction).item() * 100, 2)), gender_target[target.item()], filename)
def visualize_results(model, images, labels):
model.eval()
preds = model(images).softmax(dim=-1)
for idx, (image, pred, label) in enumerate(zip(images, preds, labels)):
test_vis_image_dir = Path('test-visualisation-images')
if not test_vis_image_dir.exists():
test_vis_image_dir.mkdir()
filename = test_vis_image_dir / f'{idx}.jpg'
torchvision.utils.save_image(image, filename)
data_mod.setup()
train_data = data_mod.test_dataloader()
img, label = next(iter(train_data))
img, label = (img[:8], label[:8])
visualize_results(model.cpu(), img, label) | code |
89139708/cell_30 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
from kaggle_secrets import UserSecretsClient
from torch import nn
from torchmetrics import Accuracy
import pytorch_lightning as pl
import torch
import torchvision
import wandb
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
from kaggle_secrets import UserSecretsClient
secret_label = 'wandb'
wandb_key = UserSecretsClient().get_secret(secret_label)
from kaggle_secrets import UserSecretsClient
secret_label = 'wandb'
secret_value = UserSecretsClient().get_secret(secret_label)
wandb.login(key=secret_value)
wandb.finish() | code |
89139708/cell_29 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.trainer import Trainer
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger)
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
data_mod = CelebADataModule(csv_path=csv_path, image_dir=img_path, train_transforms=image_transforms['train'], val_transforms=image_transforms['val'], batch_size=32, num_workers=3)
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch')
trainer = Trainer(gpus=1, max_epochs=6, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer.fit(model, data_mod)
trainer.validate(model, data_mod)
best_model_path = trainer.checkpoint_callback.best_model_path
model = LitModel.load_from_checkpoint(best_model_path, n_classes=2)
trainer.test(model, data_mod) | code |
89139708/cell_26 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.trainer import Trainer
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger)
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
data_mod = CelebADataModule(csv_path=csv_path, image_dir=img_path, train_transforms=image_transforms['train'], val_transforms=image_transforms['val'], batch_size=32, num_workers=3)
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch')
trainer = Trainer(gpus=1, max_epochs=6, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer.fit(model, data_mod)
trainer.validate(model, data_mod) | code |
89139708/cell_16 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
transformed_image = Image.open(single_image_path).convert('RGB')
transformed_image = image_transforms['train'](transformed_image)
print(f'Shape of the image after transform: {transformed_image.shape}') | code |
89139708/cell_3 | [
"text_plain_output_1.png"
] | !pip install --upgrade wandb | code |
89139708/cell_17 | [
"image_output_1.png"
] | from PIL import Image
from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import matplotlib.pyplot as plt
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
def show_image(image_tensor):
image_tensor = torch.permute(image_tensor.cpu(), (1, 2, 0))
plt.axis('off')
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
transformed_image = Image.open(single_image_path).convert('RGB')
transformed_image = image_transforms['train'](transformed_image)
show_image(transformed_image) | code |
89139708/cell_24 | [
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
from kaggle_secrets import UserSecretsClient
from torch import nn
from torchmetrics import Accuracy
import pytorch_lightning as pl
import torch
import torchvision
import wandb
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
from kaggle_secrets import UserSecretsClient
secret_label = 'wandb'
wandb_key = UserSecretsClient().get_secret(secret_label)
from kaggle_secrets import UserSecretsClient
secret_label = 'wandb'
secret_value = UserSecretsClient().get_secret(secret_label)
wandb.login(key=secret_value) | code |
89139708/cell_14 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import matplotlib.pyplot as plt
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
def show_image(image_tensor):
image_tensor = torch.permute(image_tensor.cpu(), (1, 2, 0))
plt.axis('off')
plt.imshow(image_tensor)
show_image(orig_image) | code |
89139708/cell_27 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.trainer import Trainer
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
wandb_logger = WandbLogger(project='gender-detection-vit')
model = LitModel(2, wandb_logger=wandb_logger)
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape
image_transforms = {'train': transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()]), 'val': transforms.Compose([transforms.Resize(size=(218, 178)), transforms.ToTensor()])}
data_mod = CelebADataModule(csv_path=csv_path, image_dir=img_path, train_transforms=image_transforms['train'], val_transforms=image_transforms['val'], batch_size=32, num_workers=3)
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch')
trainer = Trainer(gpus=1, max_epochs=6, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer.fit(model, data_mod)
trainer.validate(model, data_mod)
best_model_path = trainer.checkpoint_callback.best_model_path
print(f'Best model {best_model_path}')
model = LitModel.load_from_checkpoint(best_model_path, n_classes=2) | code |
89139708/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
import os
import pytorch_lightning as pl
import torch
import torchvision
import wandb
img_path = Path('../input/celeba-dataset/img_align_celeba/img_align_celeba')
csv_path = Path('../input/celeba-dataset/list_attr_celeba.csv')
class LitModel(pl.LightningModule):
def __init__(self, n_classes, download_pretrained=True, wandb_logger=None, **kwargs):
super().__init__()
self.model = torchvision.models.resnet18(pretrained=True)
self.model.fc = nn.Linear(512, n_classes)
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.loss = nn.CrossEntropyLoss()
self.wandb_logger = wandb_logger
self.opt_params = {'lr': 0.001}
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
loss = self.loss(y_hat_logits, y)
train_acc = self.train_accuracy(y_hat_logits, y)
self.log('train_acc', train_acc, prog_bar=True)
self.log('train_loss', loss)
return {'loss': loss, 'progress_bar': {'train_acc': train_acc}}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('train_avg_loss', avg_train_loss, on_epoch=True)
self.log('train_acc_epoch', self.train_accuracy, on_epoch=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.val_accuracy(y_hat_logits, y)
val_loss = self.loss(y_hat_logits, y)
return {'val_loss': val_loss, 'out_logits': y_hat_logits}
def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
if not self.wandb_logger:
return
if batch_idx == 0:
outputs = outputs['out_logits']
n = 20
x, y = batch
images = [img for img in x[:n]]
assert len(outputs.shape) == 2
assert outputs.shape[-1] == 2
preds = outputs[:n].softmax(dim=-1)
pred_prob, pred_class = preds.max(dim=-1)
columns = ['image', 'pred_class', 'true_class', 'pred_prob']
label_to_text = {0: 'Female', 1: 'Male'}
td = []
for image, y_pred_class, y_real, y_pred_prob in zip(x[:n], pred_class, y[:n], pred_prob):
pred_class = label_to_text[y_pred_class.item()]
true_class = label_to_text[y_real.item()]
td.append([wandb.Image(image), pred_class, true_class, y_pred_prob])
self.wandb_logger.log_table(key='samples', columns=columns, data=td)
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log('val_acc_epoch', self.val_accuracy, on_epoch=True, prog_bar=True)
self.log('val_loss_epoch', avg_val_loss, on_epoch=True, prog_bar=True)
return {'val_loss': avg_val_loss}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat_logits = self.forward(x)
self.test_accuracy(y_hat_logits, y)
test_loss = self.loss(y_hat_logits, y)
self.log('test_loss', test_loss, on_epoch=True)
self.log('test_acc', self.test_accuracy, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), **self.opt_params)
return optimizer
class CelebADataset(Dataset):
def __init__(self, image_attr_ordered_map, base_image_dir: str, transforms):
self.image_attr_ordered_map = image_attr_ordered_map
self.base_image_dir = base_image_dir
self.transforms = transforms
def __len__(self):
return len(self.image_attr_ordered_map)
def __getitem__(self, index):
img_name, labels = self.image_attr_ordered_map[index]
image_path = os.path.join(self.base_image_dir, img_name)
image = Image.open(image_path).convert('RGB')
tensor_x = self.transforms(image)
return (tensor_x, labels)
single_image_path = img_path / '000002.jpg'
orig_image = Image.open(single_image_path).convert('RGB')
orig_image = transforms.ToTensor()(orig_image)
orig_image.shape | code |
18139890/cell_42 | [
"text_plain_output_1.png"
] | from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy']) | code |
18139890/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum() | code |
18139890/cell_56 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
import numpy as np
import pandas as pd
import re
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
"""Adding additional informative columns, as most toxic tweets contain
exclamations, capitalized words that can serve as important markers"""
regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]')
train_data['capitals'] = train_data['comment_text'].apply(lambda x: sum((1 for c in x if c.isupper())))
train_data['exclamation_points'] = train_data['comment_text'].apply(lambda x: len(regex.findall(x)))
train_data['total_length'] = train_data['comment_text'].apply(len)
features_added = ('capitals', 'exclamation_points', 'total_length')
features_existing = ('target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat', 'funny', 'wow', 'sad', 'likes', 'disagree', 'sexual_explicit', 'identity_annotator_count', 'toxicity_annotator_count')
rows = [{c: train_data[f].corr(train_data[c]) for c in features_existing} for f in features_added]
train_correlations = pd.DataFrame(rows, index=features_added)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
X = train_data[aux_columns + ['comment_text']]
y = train_data['target']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([sequences_matrix, train_meta_features], y_train, batch_size=128, epochs=5, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)])
test_sequences = tok.texts_to_sequences(x_test)
test_sequences_matrix = sequence.pad_sequences(test_sequences, maxlen=max_len)
score = model.evaluate([test_sequences_matrix, test_meta_features], y_test, verbose=True)
X = train_data['comment_text']
y = train_data['target']
data_sequences = tok.texts_to_sequences(X)
data_matrix = sequence.pad_sequences(data_sequences)
y = np_utils.to_categorical(y, num_classes=2)
X_meta_features = np.asarray(train_data[aux_columns])
max_len = data_matrix.shape[1]
model = Meta_RNN()
model.compile(loss='binary_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([data_matrix, X_meta_features], y, batch_size=512, epochs=5, validation_split=0.0, verbose=True)
test_data = pd.read_csv('../input/test.csv')
meta_features = np.asarray(test_data[aux_columns])
data = test_data['comment_text']
sequences = tok.texts_to_sequences(data)
test_sequences_matrix = sequence.pad_sequences(sequences, maxlen=max_len)
print(test_sequences_matrix.shape)
print(meta_features.shape) | code |
18139890/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
sns.set_style('darkgrid')
sns.distplot(train_data['target']) | code |
18139890/cell_41 | [
"text_plain_output_1.png"
] | from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
print(sequences_matrix.shape)
print(train_meta_features.shape) | code |
18139890/cell_2 | [
"text_html_output_1.png"
] | import os
import os
print(os.listdir('../input')) | code |
18139890/cell_50 | [
"text_plain_output_1.png"
] | from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
X = train_data[aux_columns + ['comment_text']]
y = train_data['target']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
test_sequences = tok.texts_to_sequences(x_test)
test_sequences_matrix = sequence.pad_sequences(test_sequences, maxlen=max_len)
X = train_data['comment_text']
y = train_data['target']
data_sequences = tok.texts_to_sequences(X)
data_matrix = sequence.pad_sequences(data_sequences)
y = np_utils.to_categorical(y, num_classes=2)
X_meta_features = np.asarray(train_data[aux_columns])
print(data_matrix.shape)
print(X_meta_features.shape)
print(y.shape) | code |
18139890/cell_52 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
"""Adding additional informative columns, as most toxic tweets contain
exclamations, capitalized words that can serve as important markers"""
regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]')
train_data['capitals'] = train_data['comment_text'].apply(lambda x: sum((1 for c in x if c.isupper())))
train_data['exclamation_points'] = train_data['comment_text'].apply(lambda x: len(regex.findall(x)))
train_data['total_length'] = train_data['comment_text'].apply(len)
features_added = ('capitals', 'exclamation_points', 'total_length')
features_existing = ('target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat', 'funny', 'wow', 'sad', 'likes', 'disagree', 'sexual_explicit', 'identity_annotator_count', 'toxicity_annotator_count')
rows = [{c: train_data[f].corr(train_data[c]) for c in features_existing} for f in features_added]
train_correlations = pd.DataFrame(rows, index=features_added)
test_data = pd.read_csv('../input/test.csv')
test_data.head(5) | code |
18139890/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import numpy as np
import pandas as pd
import seaborn as sns
from nltk.tokenize import word_tokenize
import re
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from sklearn.preprocessing import LabelEncoder
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.optimizers import adam
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping | code |
18139890/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100 | code |
18139890/cell_45 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([sequences_matrix, train_meta_features], y_train, batch_size=128, epochs=5, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)])
test_sequences = tok.texts_to_sequences(x_test)
test_sequences_matrix = sequence.pad_sequences(test_sequences, maxlen=max_len)
score = model.evaluate([test_sequences_matrix, test_meta_features], y_test, verbose=True)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) | code |
18139890/cell_51 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
X = train_data[aux_columns + ['comment_text']]
y = train_data['target']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([sequences_matrix, train_meta_features], y_train, batch_size=128, epochs=5, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)])
test_sequences = tok.texts_to_sequences(x_test)
test_sequences_matrix = sequence.pad_sequences(test_sequences, maxlen=max_len)
score = model.evaluate([test_sequences_matrix, test_meta_features], y_test, verbose=True)
X = train_data['comment_text']
y = train_data['target']
data_sequences = tok.texts_to_sequences(X)
data_matrix = sequence.pad_sequences(data_sequences)
y = np_utils.to_categorical(y, num_classes=2)
X_meta_features = np.asarray(train_data[aux_columns])
max_len = data_matrix.shape[1]
model = Meta_RNN()
model.compile(loss='binary_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([data_matrix, X_meta_features], y, batch_size=512, epochs=5, validation_split=0.0, verbose=True) | code |
18139890/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5) | code |
18139890/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
import seaborn as sns
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
sns.set_style('darkgrid')
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
"""Adding additional informative columns, as most toxic tweets contain
exclamations, capitalized words that can serve as important markers"""
regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]')
train_data['capitals'] = train_data['comment_text'].apply(lambda x: sum((1 for c in x if c.isupper())))
train_data['exclamation_points'] = train_data['comment_text'].apply(lambda x: len(regex.findall(x)))
train_data['total_length'] = train_data['comment_text'].apply(len)
features_added = ('capitals', 'exclamation_points', 'total_length')
features_existing = ('target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat', 'funny', 'wow', 'sad', 'likes', 'disagree', 'sexual_explicit', 'identity_annotator_count', 'toxicity_annotator_count')
rows = [{c: train_data[f].corr(train_data[c]) for c in features_existing} for f in features_added]
train_correlations = pd.DataFrame(rows, index=features_added)
sns.set()
sns.heatmap(train_correlations) | code |
18139890/cell_43 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import CuDNNLSTM, Activation, Dense, Dropout, Input, Embedding, concatenate, Bidirectional
from keras.models import Sequential, Model
from keras.optimizers import adam
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
train_data.sample(5)
train_data['target'].value_counts()[0] / train_data.shape[0] * 100
train_data.isna().sum()
train_data.drop(['id'], axis=1, inplace=True)
X = train_data['comment_text']
y = train_data['target']
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
aux_columns = ['capitals', 'exclamation_points', 'total_length']
train_meta_features = np.asarray(x_train[aux_columns])
test_meta_features = np.asarray(x_test[aux_columns])
x_train = x_train['comment_text']
x_test = x_test['comment_text']
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test, num_classes=2)
max_words = 10000
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(x_train)
sequences = tok.texts_to_sequences(x_train)
sequences_matrix = sequence.pad_sequences(sequences)
max_len = sequences_matrix.shape[1]
def Meta_RNN():
nlp_input = Input(shape=(max_len,), name='nlp_input')
meta_input = Input(shape=(3,), name='meta_input')
embedding_layer = Embedding(max_words, 64, input_length=max_len)(nlp_input)
nlp_out = Bidirectional(CuDNNLSTM(64))(embedding_layer)
combined_input = concatenate([nlp_out, meta_input])
layer = Dense(256, name='fc1')(combined_input)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(2, name='out_layer')(layer)
layer = Activation('softmax')(layer)
model = Model(inputs=[nlp_input, meta_input], outputs=layer)
return model
model = Meta_RNN()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001, amsgrad=True), metrics=['accuracy'])
model.fit([sequences_matrix, train_meta_features], y_train, batch_size=128, epochs=5, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)]) | code |
73089284/cell_21 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
logmodel = LogisticRegression(fit_intercept=True)
logmodel.max_iter = 1000
logit_result = logmodel.fit(X_train, Y_train)
ylm_predicted = logit_result.predict(X_test)
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
imp = pd.DataFrame(rf_clf.feature_importances_, index=X_test.columns, columns=['POLICY_PRICE_CHANGE'])
imp.sort_values('POLICY_PRICE_CHANGE').plot(kind='barh', figsize=(12, 8))
titles_options = [("Normalized cm in log regression", 'true')] #normalized in logisticregression. ("Confusion matrix, without normalization", None)
for title, normalize in titles_options:
disp = plot_confusion_matrix(logmodel, X_test, Y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
titles_options = [('Normalized cm in rf', 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(rf_clf, X_test, Y_test, cmap=plt.cm.Blues, normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show() | code |
73089284/cell_20 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
logmodel = LogisticRegression(fit_intercept=True)
logmodel.max_iter = 1000
logit_result = logmodel.fit(X_train, Y_train)
ylm_predicted = logit_result.predict(X_test)
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
imp = pd.DataFrame(rf_clf.feature_importances_, index=X_test.columns, columns=['POLICY_PRICE_CHANGE'])
imp.sort_values('POLICY_PRICE_CHANGE').plot(kind='barh', figsize=(12, 8))
titles_options = [('Normalized cm in log regression', 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(logmodel, X_test, Y_test, cmap=plt.cm.Blues, normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show() | code |
73089284/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape
df.head() | code |
73089284/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape
df = df.copy()
df.reset_index(drop=True)
df.dropna(axis=0, inplace=True)
obj_columns = df.select_dtypes(['object']).columns
df[obj_columns] = df[obj_columns].apply(lambda x: x.astype('category'))
df[obj_columns] = df[obj_columns].apply(lambda x: x.cat.codes)
df.head(2) | code |
73089284/cell_19 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
imp = pd.DataFrame(rf_clf.feature_importances_, index=X_test.columns, columns=['POLICY_PRICE_CHANGE'])
imp.sort_values('POLICY_PRICE_CHANGE').plot(kind='barh', figsize=(12, 8))
X_test | code |
73089284/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape
df = df.copy()
df.reset_index(drop=True) | code |
73089284/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
imp = pd.DataFrame(rf_clf.feature_importances_, index=X_test.columns, columns=['POLICY_PRICE_CHANGE'])
imp.sort_values('POLICY_PRICE_CHANGE').plot(kind='barh', figsize=(12, 8)) | code |
73089284/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape
df = df.copy()
df.reset_index(drop=True)
df.info() | code |
73089284/cell_16 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
logmodel = LogisticRegression(fit_intercept=True)
logmodel.max_iter = 1000
logit_result = logmodel.fit(X_train, Y_train)
ylm_predicted = logit_result.predict(X_test)
plot_confusion_matrix(logmodel, X_test, Y_test)
plt.show() | code |
73089284/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
logmodel = LogisticRegression(fit_intercept=True)
logmodel.max_iter = 1000
logit_result = logmodel.fit(X_train, Y_train)
ylm_predicted = logit_result.predict(X_test)
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
plot_confusion_matrix(rf_clf, X_test, Y_test)
plt.show() | code |
73089284/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
rf_clf = RandomForestClassifier(bootstrap=False)
result_clf = rf_clf.fit(X_train, Y_train)
yrf_predicted = rf_clf.predict(X_test)
imp = pd.DataFrame(rf_clf.feature_importances_, index=X_test.columns, columns=['POLICY_PRICE_CHANGE'])
imp.sort_values('POLICY_PRICE_CHANGE').plot(kind='barh', figsize=(12, 8))
imp = pd.DataFrame(rf.feature_importances_, index=x_train.columns, columns=['importance'])
imp.sort_values('importance').plot(kind='barh', figsize=(12, 8)) | code |
73089284/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape
df = df.copy()
df.reset_index(drop=True)
df.dropna(axis=0, inplace=True)
obj_columns = df.select_dtypes(['object']).columns
df[obj_columns] = df[obj_columns].apply(lambda x: x.astype('category'))
df[obj_columns] = df[obj_columns].apply(lambda x: x.cat.codes)
df.info() | code |
73089284/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/kasco-dataset-russian/Задача 1/Прогнозирование пролонгации/Данные для задачи.txt', sep=';')
df.shape | code |
122251329/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
csv_path = '/kaggle/input/heart-failure-prediction/heart.csv'
hrz = pd.read_csv(csv_path)
target = ['HeartDisease']
num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak']
cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope']
cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina']
cat_attribs = cat_nom_attribs + cat_bin_attribs
attribs = num_attribs + target
sns.pairplot(hrz[attribs], hue='HeartDisease', diag_kind='kde') | code |
122251329/cell_9 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
csv_path = '/kaggle/input/heart-failure-prediction/heart.csv'
hrz = pd.read_csv(csv_path)
hrz.head() | code |
122251329/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
csv_path = '/kaggle/input/heart-failure-prediction/heart.csv'
hrz = pd.read_csv(csv_path)
target = ['HeartDisease']
num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak']
cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope']
cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina']
cat_attribs = cat_nom_attribs + cat_bin_attribs
attribs = num_attribs + target
ncol = 3
nrow = int(np.ceil(len(num_attribs)/ncol))
fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None)
i = 1
for col in num_attribs:
plt.subplot(nrow, ncol, i)
ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple="stack", palette='colorblind') #kdeplot
ax.set_xlabel(col, fontsize=12)
ax.set_ylabel("count", fontsize=12)
sns.despine(right=True)
sns.despine(offset=0, trim=False)
i+=1
fig.delaxes(axs[nrow-1, ncol-1])
plt.suptitle('Distribution of Numerical Features', fontsize = 14);
plt.tight_layout()
ncol = 3
nrow = int(np.ceil(len(num_attribs) / ncol))
f, axes = plt.subplots(nrow, ncol, figsize=(8, 6))
for name, ax in zip(num_attribs, axes.flatten()):
sns.boxplot(y=name, x='HeartDisease', data=hrz, orient='v', ax=ax)
f.delaxes(axes[nrow - 1, ncol - 1])
plt.suptitle('Box-and-whisker plot', fontsize=14)
plt.tight_layout() | code |
122251329/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
csv_path = '/kaggle/input/heart-failure-prediction/heart.csv'
hrz = pd.read_csv(csv_path)
target = ['HeartDisease']
num_attribs = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak']
cat_nom_attribs = ['ChestPainType', 'RestingECG', 'ST_Slope']
cat_bin_attribs = ['Sex', 'FastingBS', 'ExerciseAngina']
cat_attribs = cat_nom_attribs + cat_bin_attribs
attribs = num_attribs + target
ncol = 3
nrow = int(np.ceil(len(num_attribs) / ncol))
fig, axs = plt.subplots(nrow, ncol, figsize=(10, 5), facecolor=None)
i = 1
for col in num_attribs:
plt.subplot(nrow, ncol, i)
ax = sns.histplot(data=hrz, x=col, hue=target[0], multiple='stack', palette='colorblind')
ax.set_xlabel(col, fontsize=12)
ax.set_ylabel('count', fontsize=12)
sns.despine(right=True)
sns.despine(offset=0, trim=False)
i += 1
fig.delaxes(axs[nrow - 1, ncol - 1])
plt.suptitle('Distribution of Numerical Features', fontsize=14)
plt.tight_layout() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.