path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
129012199/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
# north_american_median = df['NA_Sales'].median()
# print(north_american_median)
# north_american_median_index = df.index[df.NA_Sales == north_american_median][0]
# print(df.iloc[north_american_median_index-5:north_american_median_index+5]['Name'])
median_value = df["NA_Sales"].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values("NA_Sales", ascending=False)
print(median_value)
new_df
top_seller = df.head(1)
top_seller
platform_avgs = df.groupby(by='Platform')['Global_Sales'].mean().sort_values(ascending=False)
platform_avgs
df[['Name', 'JP_Sales']].nlargest(20, 'JP_Sales') | code |
129012199/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129012199/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform | code |
129012199/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
# north_american_median = df['NA_Sales'].median()
# print(north_american_median)
# north_american_median_index = df.index[df.NA_Sales == north_american_median][0]
# print(df.iloc[north_american_median_index-5:north_american_median_index+5]['Name'])
median_value = df["NA_Sales"].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values("NA_Sales", ascending=False)
print(median_value)
new_df
top_seller = df.head(1)
top_seller | code |
129012199/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df | code |
129012199/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub
most_common_platform = df.groupby('Platform').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_platform
most_common_genre = df.groupby('Genre').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_genre
top_games = df.sort_values('Global_Sales', ascending=False)
# north_american_median = df['NA_Sales'].median()
# print(north_american_median)
# north_american_median_index = df.index[df.NA_Sales == north_american_median][0]
# print(df.iloc[north_american_median_index-5:north_american_median_index+5]['Name'])
median_value = df["NA_Sales"].median()
df_sorted = df.iloc[(df['NA_Sales'] - median_value).abs().argsort()][::-1].reset_index(drop=True)
diff = (df['NA_Sales'] - median_value).abs()
df_sorted = df.assign(diff=diff).sort_values(['diff', 'NA_Sales'])
above_median_indices = df_sorted[df_sorted['NA_Sales'] > median_value].head(5).index
below_median_indices = df_sorted[df_sorted['NA_Sales'] < median_value].head(5).index
new_df = df.loc[above_median_indices.union(below_median_indices)].sort_values("NA_Sales", ascending=False)
print(median_value)
new_df
top_seller = df.head(1)
top_seller
platform_avgs = df.groupby(by='Platform')['Global_Sales'].mean().sort_values(ascending=False)
platform_avgs | code |
129012199/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df
most_common_pub = df.groupby('Publisher').count().sort_values(by='Rank', ascending=False).head(1).index[0]
most_common_pub | code |
16115005/cell_13 | [
"text_html_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
from sklearn.tree import DecisionTreeRegressor
fraud_model = DecisionTreeRegressor(random_state=1)
fraud_model.fit(X, y) | code |
16115005/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns | code |
16115005/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
fraud_data.head() | code |
16115005/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
print('Setup Complete') | code |
16115005/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
X.head() | code |
16115005/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
from sklearn.tree import DecisionTreeRegressor
fraud_model = DecisionTreeRegressor(random_state=1)
fraud_model.fit(X, y)
from sklearn.metrics import mean_absolute_error
predicted_fraud = fraud_model.predict(X)
mean_absolute_error(y, predicted_fraud)
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)
fraud_model = DecisionTreeRegressor()
fraud_model.fit(train_X, train_y)
val_predictions = fraud_model.predict(val_X)
print(mean_absolute_error(val_y, val_predictions)) | code |
16115005/cell_15 | [
"text_html_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
from sklearn.tree import DecisionTreeRegressor
fraud_model = DecisionTreeRegressor(random_state=1)
fraud_model.fit(X, y)
print('Making predictions for the following 5 transactions:')
print(X.head())
print('The predictions are')
print(fraud_model.predict(X.head())) | code |
16115005/cell_17 | [
"text_html_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
from sklearn.tree import DecisionTreeRegressor
fraud_model = DecisionTreeRegressor(random_state=1)
fraud_model.fit(X, y)
from sklearn.metrics import mean_absolute_error
predicted_fraud = fraud_model.predict(X)
mean_absolute_error(y, predicted_fraud) | code |
16115005/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
fraud_data_path = '../input/credit_fraud_sytn.csv'
fraud_data = pd.read_csv(fraud_data_path)
fraud_data.columns
y = fraud_data.isFradulent
feature_names = ['Average Amount/transaction/day', 'Transaction_amount', 'isForeignTransaction', 'isHighRiskCountry']
X = fraud_data[feature_names]
X.describe() | code |
34133327/cell_42 | [
"text_plain_output_1.png"
] | from scipy import stats
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
customers_label = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_label['Gender'] = le.fit_transform(customers_label['Gender'])
customers_one_hot = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_one_hot = pd.get_dummies(customers_one_hot)
customers_one_hot.head() | code |
34133327/cell_21 | [
"text_html_output_1.png"
] | from scipy import stats
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]] | code |
34133327/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.isnull().sum()
customers_null.dropna()
customers_null.fillna(0)
customers_null.describe() | code |
34133327/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.head(10) | code |
34133327/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.info() | code |
34133327/cell_33 | [
"image_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
replace_map = {'Gender': {'Male': 1, 'Female': 2}}
labels = cat_df_customers['Gender'].astype('category').cat.categories.tolist()
replace_map_comp = {'Gender': {k: v for k, v in zip(labels, list(range(1, len(labels) + 1)))}}
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_replace.replace(replace_map_comp, inplace=True)
cat_df_customers_replace.head() | code |
34133327/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum() | code |
34133327/cell_40 | [
"text_html_output_1.png"
] | from scipy import stats
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
customers_label = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_label['Gender'] = le.fit_transform(customers_label['Gender'])
customers_label.head(10) | code |
34133327/cell_29 | [
"image_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers.head() | code |
34133327/cell_26 | [
"text_html_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
customers.hist('Age', bins=35)
plt.title('Distribuição dos clientes pela idade')
plt.xlabel('Idade') | code |
34133327/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.isnull().sum()
customers_null.dropna() | code |
34133327/cell_45 | [
"text_html_output_1.png"
] | from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
customers_label = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_label['Gender'] = le.fit_transform(customers_label['Gender'])
customers_one_hot = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_one_hot = pd.get_dummies(customers_one_hot)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_one_hot = customers
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
customers_ohe = ohe.fit_transform(customers_one_hot['Gender'].values.reshape(-1, 1)).toarray()
customers_ohe.shape | code |
34133327/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)']) | code |
34133327/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.info() | code |
34133327/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers.describe() | code |
34133327/cell_38 | [
"text_html_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes
cat_df_customers_lc['Gender'] = cat_df_customers_lc['Gender'].cat.codes
cat_df_customers_lc.head() | code |
34133327/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.head() | code |
34133327/cell_31 | [
"text_html_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
replace_map = {'Gender': {'Male': 1, 'Female': 2}}
labels = cat_df_customers['Gender'].astype('category').cat.categories.tolist()
replace_map_comp = {'Gender': {k: v for k, v in zip(labels, list(range(1, len(labels) + 1)))}}
print(replace_map_comp) | code |
34133327/cell_46 | [
"text_html_output_1.png"
] | from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
customers_label = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_label['Gender'] = le.fit_transform(customers_label['Gender'])
customers_one_hot = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_one_hot = pd.get_dummies(customers_one_hot)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers_one_hot = customers
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
customers_ohe = ohe.fit_transform(customers_one_hot['Gender'].values.reshape(-1, 1)).toarray()
customers_ohe.shape
customers_ohe | code |
34133327/cell_24 | [
"text_html_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
sns.countplot(x='Gender', data=customers)
plt.title('Distribuição dos clientes quanto ao gênero') | code |
34133327/cell_14 | [
"text_html_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.isnull().sum()
customers_null.dropna()
customers_null.fillna(0)
customers_null.fillna(customers_null.mean()) | code |
34133327/cell_22 | [
"text_html_output_1.png"
] | from scipy import stats
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
df_salario_outlier | code |
34133327/cell_10 | [
"text_html_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.isnull().sum() | code |
34133327/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import matplotlib.pyplot as plt #biblioteca para criar gráficos "comuns" ao estilo Matplot
import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
import seaborn as sns #biblioteca utilizada para cirar gráficos mais "bonitos"
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
boxplot = customers.boxplot(column=['Age', 'Annual Income (k$)', 'Spending Score (1-100)'])
#constroi o boxplot para as colunas desejadas
from scipy import stats
z = np.abs(stats.zscore(customers['Annual Income (k$)'].values))
thereshold = 2
result = np.where(z > thereshold)
df_salario_outlier = customers.iloc[result[0]]
cat_df_customers = customers.select_dtypes(include=['object'])
cat_df_customers_replace = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
cat_df_customers_lc = customers
cat_df_customers_lc['Gender'] = pd.Categorical(cat_df_customers_lc['Gender'])
cat_df_customers_lc.dtypes | code |
34133327/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np #biblioteca utilizada para trabalhar com vetores
import pandas as pd #biblioteca para trabalhar com dataframes (planilhas excel)
customers = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
customers.isnull().sum()
customers_null = customers
for col in customers_null.columns:
customers_null.loc[customers_null.sample(frac=0.1).index, col] = np.nan
customers_null.isnull().sum()
customers_null.dropna()
customers_null.fillna(0) | code |
18102956/cell_13 | [
"text_plain_output_1.png"
] | from scipy.interpolate import griddata
from sklearn.metrics import mean_squared_error
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors import KNeighborsRegressor
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindf = pd.read_csv('../input/train.csv')
testdf = pd.read_csv('../input/test.csv')
totalRows = traindf[' y'].max()
totalCols = traindf['x'].max()
trainData = traindf.iloc[:, 2:]
trainlabels = traindf.iloc[:, 0:2]
testData = testdf.iloc[:, 2:]
testlabels = testdf.iloc[:, 0:2]
dimOfdata = trainData.shape[1]
numberOfSamples = len(trainlabels)
train_X = trainData.values
train_Y = trainlabels.values
test_X = testData.values
test_Y = testlabels.values
from scipy.interpolate import griddata
room_y = np.zeros(((totalRows + 1) * (totalCols + 1), 2))
k = 0
for i in range(totalCols + 1):
for j in range(totalRows + 1):
room_y[k, :] = [i, j]
k += 1
room_x = griddata(train_Y, train_X, room_y, method='cubic')
room_x = np.nan_to_num(room_x)
k = 4
model = MultiOutputRegressor(KNeighborsRegressor(k))
model.fit(train_x, train_y)
pred_results = model.predict(val_x)
resultdf = pd.read_csv('../input/sample_submission.csv')
realy, realx = (resultdf.iloc[:, 0], resultdf.iloc[:, 1])
testdf[' y'] = pred_results[:, 1]
testdf['x'] = pred_results[:, 0]
data = {'realy': realy, 'realx': realx, 'predicty': pred_results[:, 0], 'predictx': pred_results[:, 1]}
df = pd.DataFrame(data)
from sklearn.metrics import mean_squared_error
predicts = testdf.iloc[:, 0:2]
trues = resultdf.iloc[:, 0:2]
print('total mse: ', mean_squared_error(trues, predicts))
df['mse'] = df.apply(lambda x: mean_squared_error([x['realx'], x['realy']], [x['predictx'], x['predicty']]), axis=1)
df.to_csv('./submission.csv')
df.head() | code |
18102956/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
import matplotlib.pyplot as plt | code |
18102956/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from scipy.interpolate import griddata
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors import KNeighborsRegressor
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindf = pd.read_csv('../input/train.csv')
testdf = pd.read_csv('../input/test.csv')
totalRows = traindf[' y'].max()
totalCols = traindf['x'].max()
trainData = traindf.iloc[:, 2:]
trainlabels = traindf.iloc[:, 0:2]
testData = testdf.iloc[:, 2:]
testlabels = testdf.iloc[:, 0:2]
dimOfdata = trainData.shape[1]
numberOfSamples = len(trainlabels)
train_X = trainData.values
train_Y = trainlabels.values
test_X = testData.values
test_Y = testlabels.values
from scipy.interpolate import griddata
room_y = np.zeros(((totalRows + 1) * (totalCols + 1), 2))
k = 0
for i in range(totalCols + 1):
for j in range(totalRows + 1):
room_y[k, :] = [i, j]
k += 1
room_x = griddata(train_Y, train_X, room_y, method='cubic')
room_x = np.nan_to_num(room_x)
k = 4
model = MultiOutputRegressor(KNeighborsRegressor(k))
model.fit(train_x, train_y) | code |
18102956/cell_3 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindf = pd.read_csv('../input/train.csv')
testdf = pd.read_csv('../input/test.csv')
totalRows = traindf[' y'].max()
totalCols = traindf['x'].max()
print('room size ', totalRows, ' ', totalCols)
trainData = traindf.iloc[:, 2:]
trainlabels = traindf.iloc[:, 0:2]
testData = testdf.iloc[:, 2:]
testlabels = testdf.iloc[:, 0:2]
dimOfdata = trainData.shape[1]
print('number of features: ', dimOfdata)
numberOfSamples = len(trainlabels)
print('total samples: ', numberOfSamples) | code |
18102956/cell_14 | [
"text_plain_output_1.png"
] | from scipy.interpolate import griddata
from sklearn.metrics import mean_squared_error
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors import KNeighborsRegressor
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindf = pd.read_csv('../input/train.csv')
testdf = pd.read_csv('../input/test.csv')
totalRows = traindf[' y'].max()
totalCols = traindf['x'].max()
trainData = traindf.iloc[:, 2:]
trainlabels = traindf.iloc[:, 0:2]
testData = testdf.iloc[:, 2:]
testlabels = testdf.iloc[:, 0:2]
dimOfdata = trainData.shape[1]
numberOfSamples = len(trainlabels)
train_X = trainData.values
train_Y = trainlabels.values
test_X = testData.values
test_Y = testlabels.values
from scipy.interpolate import griddata
room_y = np.zeros(((totalRows + 1) * (totalCols + 1), 2))
k = 0
for i in range(totalCols + 1):
for j in range(totalRows + 1):
room_y[k, :] = [i, j]
k += 1
room_x = griddata(train_Y, train_X, room_y, method='cubic')
room_x = np.nan_to_num(room_x)
k = 4
model = MultiOutputRegressor(KNeighborsRegressor(k))
model.fit(train_x, train_y)
pred_results = model.predict(val_x)
resultdf = pd.read_csv('../input/sample_submission.csv')
realy, realx = (resultdf.iloc[:, 0], resultdf.iloc[:, 1])
testdf[' y'] = pred_results[:, 1]
testdf['x'] = pred_results[:, 0]
data = {'realy': realy, 'realx': realx, 'predicty': pred_results[:, 0], 'predictx': pred_results[:, 1]}
df = pd.DataFrame(data)
from sklearn.metrics import mean_squared_error
predicts = testdf.iloc[:, 0:2]
trues = resultdf.iloc[:, 0:2]
df['mse'] = df.apply(lambda x: mean_squared_error([x['realx'], x['realy']], [x['predictx'], x['predicty']]), axis=1)
df.to_csv('./submission.csv')
imageSize = (totalRows + 1, totalCols + 1)
testlabels = resultdf.iloc[:, 0:2]
def getRouteImage():
image = np.full(imageSize, 128)
for index, lables in testlabels.iterrows():
image[lables[' y']][lables['x']] = 80
return image
image = getRouteImage()
for predict_y, predict_x in predicts.iloc[:, 0:2].values:
predict_y, predict_x = (to_int(predict_y), to_int(predict_x))
image[predict_x][predict_y] = 255
plt.figure(figsize=(20, 20))
plt.title('images')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') | code |
121150304/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.dtypes.value_counts().plot.pie(autopct='%0.2f%%') | code |
121150304/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.info() | code |
121150304/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum()
print('************************************************\n',train['cut'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='cut')
ax.bar_label(ax.containers[0])
plt.show()
print('************************************************\n',train['color'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='color')
ax.bar_label(ax.containers[0])
plt.show()
print('************************************************\n',train['clarity'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='clarity')
ax.bar_label(ax.containers[0])
plt.show()
plt.boxplot(list(train['carat'].value_counts().keys()))
plt.title('Carat')
plt.show() | code |
121150304/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
import time
import seaborn as sns
from sklearn import metrics
import os
from lightgbm import LGBMClassifier | code |
121150304/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum()
print('************************************************\n',train['cut'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='cut')
ax.bar_label(ax.containers[0])
plt.show()
print('************************************************\n',train['color'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='color')
ax.bar_label(ax.containers[0])
plt.show()
print('************************************************\n', train['clarity'].value_counts(), '\n************************************************')
plt.figure(figsize=(6, 5))
ax = sns.countplot(data=train, x='clarity')
ax.bar_label(ax.containers[0])
plt.show() | code |
121150304/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum()
print('************************************************\n',train['cut'].value_counts(), '\n************************************************')
plt.figure(figsize=(6,5))
ax = sns.countplot(data=train, x='cut')
ax.bar_label(ax.containers[0])
plt.show()
print('************************************************\n', train['color'].value_counts(), '\n************************************************')
plt.figure(figsize=(6, 5))
ax = sns.countplot(data=train, x='color')
ax.bar_label(ax.containers[0])
plt.show() | code |
121150304/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
test | code |
121150304/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum()
print('************************************************\n', train['cut'].value_counts(), '\n************************************************')
plt.figure(figsize=(6, 5))
ax = sns.countplot(data=train, x='cut')
ax.bar_label(ax.containers[0])
plt.show() | code |
121150304/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train | code |
121150304/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum()
plt.figure(figsize=(15, 7))
sns.heatmap(train.isnull(), cmap='viridis') | code |
121150304/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test = pd.read_csv('/kaggle/input/playground-series-s3e8/test.csv')
train = pd.read_csv('/kaggle/input/playground-series-s3e8/train.csv')
sub = pd.read_csv('/kaggle/input/playground-series-s3e8/sample_submission.csv')
train.isnull().sum() | code |
122244134/cell_13 | [
"text_plain_output_1.png"
] | from pathlib import Path
from sentence_transformers import SentenceTransformer
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId')
prompts = pd.read_csv('/kaggle/input/stable-diffusion-image-to-prompts/prompts.csv')
prompts = prompts.loc[prompts['imgId'].isin(imgIds)].set_index('imgId').loc[imgIds].reset_index()
st_model = SentenceTransformer('/kaggle/input/sentence-transformers-222/all-MiniLM-L6-v2')
prompts_true = st_model.encode(prompts['prompt']) | code |
122244134/cell_9 | [
"text_plain_output_1.png"
] | from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId') | code |
122244134/cell_23 | [
"text_html_output_1.png"
] | prompt_embeddings.shape | code |
122244134/cell_19 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from scipy import spatial
from sentence_transformers import SentenceTransformer
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
class DiffusionTestDataset(Dataset):
def __init__(self, images, transform):
self.images = images
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = Image.open(self.images[idx])
image = self.transform(image)
return image
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId')
prompts = pd.read_csv('/kaggle/input/stable-diffusion-image-to-prompts/prompts.csv')
prompts = prompts.loc[prompts['imgId'].isin(imgIds)].set_index('imgId').loc[imgIds].reset_index()
st_model = SentenceTransformer('/kaggle/input/sentence-transformers-222/all-MiniLM-L6-v2')
prompts_true = st_model.encode(prompts['prompt'])
def simularity(y_pred, y_true):
cosine_sim = []
for pred, true in zip(y_pred, y_true):
cosine_sim.append(1 - spatial.distance.cosine(pred, true))
import torch
from torch import nn
from torch.optim import Adam
class SimModel(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.fc1 = nn.Linear(384, hid_dim)
self.fc2 = nn.Linear(hid_dim, 384)
self.act = nn.ReLU()
def forward(self, x):
x = self.act(self.fc1(x))
x = self.fc2(x)
return x
input = torch.FloatTensor(raw_preds)
target = torch.FloatTensor(prompts_true)
model = SimModel(128)
criterion = nn.MSELoss()
optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.01)
for i in range(100):
pred = model(input)
optimizer.zero_grad()
loss = criterion(pred, target - input)
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
result = model(input) + input
result = result.numpy()
simularity(result, prompts_true) | code |
122244134/cell_16 | [
"text_plain_output_1.png"
] | from pathlib import Path
from scipy import spatial
from sentence_transformers import SentenceTransformer
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId')
prompts = pd.read_csv('/kaggle/input/stable-diffusion-image-to-prompts/prompts.csv')
prompts = prompts.loc[prompts['imgId'].isin(imgIds)].set_index('imgId').loc[imgIds].reset_index()
st_model = SentenceTransformer('/kaggle/input/sentence-transformers-222/all-MiniLM-L6-v2')
prompts_true = st_model.encode(prompts['prompt'])
def simularity(y_pred, y_true):
cosine_sim = []
for pred, true in zip(y_pred, y_true):
cosine_sim.append(1 - spatial.distance.cosine(pred, true))
simularity(raw_preds, prompts_true) | code |
122244134/cell_17 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from sentence_transformers import SentenceTransformer
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
class DiffusionTestDataset(Dataset):
def __init__(self, images, transform):
self.images = images
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = Image.open(self.images[idx])
image = self.transform(image)
return image
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId')
prompts = pd.read_csv('/kaggle/input/stable-diffusion-image-to-prompts/prompts.csv')
prompts = prompts.loc[prompts['imgId'].isin(imgIds)].set_index('imgId').loc[imgIds].reset_index()
st_model = SentenceTransformer('/kaggle/input/sentence-transformers-222/all-MiniLM-L6-v2')
prompts_true = st_model.encode(prompts['prompt'])
import torch
from torch import nn
from torch.optim import Adam
class SimModel(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.fc1 = nn.Linear(384, hid_dim)
self.fc2 = nn.Linear(hid_dim, 384)
self.act = nn.ReLU()
def forward(self, x):
x = self.act(self.fc1(x))
x = self.fc2(x)
return x
input = torch.FloatTensor(raw_preds)
target = torch.FloatTensor(prompts_true)
model = SimModel(128)
criterion = nn.MSELoss()
optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.01)
for i in range(100):
pred = model(input)
optimizer.zero_grad()
loss = criterion(pred, target - input)
loss.backward()
optimizer.step()
if i % 10 == 9:
print(loss) | code |
122244134/cell_22 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from sentence_transformers import SentenceTransformer
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import timm
import torch
import torch
class CFG:
model_path = '/kaggle/input/stable-diffusion-vit-baseline-train/vit_base_patch16_224.pth'
model_name = 'vit_base_patch16_224'
input_size = 224
batch_size = 64
class DiffusionTestDataset(Dataset):
def __init__(self, images, transform):
self.images = images
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = Image.open(self.images[idx])
image = self.transform(image)
return image
def predict(images, model_path, model_name, input_size, batch_size):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Resize(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
dataset = DiffusionTestDataset(images, transform)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=2, drop_last=False)
model = timm.create_model(model_name, pretrained=False, num_classes=384)
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.to(device)
model.eval()
preds = []
for X in tqdm(dataloader, leave=False):
X = X.to(device)
with torch.no_grad():
X_out = model(X)
preds.append(X_out.cpu().numpy())
return (np.vstack(preds).flatten(), np.array(preds)[0])
images = list(Path('/kaggle/input/stable-diffusion-image-to-prompts/images').glob('*.png'))
imgIds = [i.stem for i in images]
EMBEDDING_LENGTH = 384
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, EMBEDDING_LENGTH), np.tile(range(EMBEDDING_LENGTH), len(imgIds)))]
prompt_embeddings, raw_preds = predict(images, CFG.model_path, CFG.model_name, CFG.input_size, CFG.batch_size)
submission = pd.DataFrame(index=imgId_eId, data=prompt_embeddings, columns=['val']).rename_axis('imgId_eId')
prompts = pd.read_csv('/kaggle/input/stable-diffusion-image-to-prompts/prompts.csv')
prompts = prompts.loc[prompts['imgId'].isin(imgIds)].set_index('imgId').loc[imgIds].reset_index()
st_model = SentenceTransformer('/kaggle/input/sentence-transformers-222/all-MiniLM-L6-v2')
prompts_true = st_model.encode(prompts['prompt'])
import torch
from torch import nn
from torch.optim import Adam
class SimModel(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.fc1 = nn.Linear(384, hid_dim)
self.fc2 = nn.Linear(hid_dim, 384)
self.act = nn.ReLU()
def forward(self, x):
x = self.act(self.fc1(x))
x = self.fc2(x)
return x
input = torch.FloatTensor(raw_preds)
target = torch.FloatTensor(prompts_true)
model = SimModel(128)
criterion = nn.MSELoss()
optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.01)
for i in range(100):
pred = model(input)
optimizer.zero_grad()
loss = criterion(pred, target - input)
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
result = model(input) + input
result = result.numpy()
submission = pd.DataFrame(index=imgId_eId, data=result.flatten(), columns=['val']).rename_axis('imgId_eId')
submission.to_csv('submission.csv')
submission.head(10) | code |
128022928/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/monthly-milk-production-pounds/monthlyMilkProduction.csv')
df.columns = ['Month', 'Production/Cow']
df = df[:-1]
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True)
display(df.head())
display(df.tail()) | code |
128022928/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
iris = sns.load_dataset('iris')
g = sns.PairGrid(iris, hue='species')
g = g.map_diag(sns.histplot)
g = g.map_offdiag(sns.scatterplot)
g = g.add_legend() | code |
128022928/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
iris = sns.load_dataset('iris')
display(iris) | code |
128022928/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
sns.boxplot(data=titanic, x='class', y='fare') | code |
128022928/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from statsmodels.tsa.seasonal import seasonal_decompose
import pandas as pd
df = pd.read_csv('/kaggle/input/monthly-milk-production-pounds/monthlyMilkProduction.csv')
df.columns = ['Month', 'Production/Cow']
df = df[:-1]
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True)
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df, model='additive')
result.plot() | code |
128022928/cell_6 | [
"text_plain_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
sns.lineplot(data=titanic, y='fare', x='embarked') | code |
128022928/cell_29 | [
"text_plain_output_1.png"
] | from statsmodels.tsa.arima.model import ARIMA
model = ARIMA(train, order=(1, 1, 2))
model_fit = model.fit() | code |
128022928/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
iris = sns.load_dataset('iris')
sns.lmplot(data=iris, x='sepal_width', y='petal_width') | code |
128022928/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
sns.barplot(data=titanic, x='fare', y='embarked') | code |
128022928/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
corpus = ['I live in Mumbai', 'I like Mumbai', 'I dont like Pune']
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names_out())
print(X) | code |
128022928/cell_3 | [
"text_plain_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
display(titanic) | code |
128022928/cell_17 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
corpus = ['I live in Mumbai', 'I like Mumbai', 'I dont like Pune']
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
corpus = ['you were born with potential', 'you were born with goodness and trust', 'you were born with ideals and dreams', 'you were born with greatness', 'you were born with wings', "you are not meant for crawling, so don't", 'you have wings', 'learn to use them and fly']
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names_out())
print(X) | code |
128022928/cell_31 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | from math import sqrt
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima.model import ARIMA
model = ARIMA(train, order=(1, 1, 2))
model_fit = model.fit()
predictions = model_fit.forecast(steps=len(test))[0]
rmse = sqrt(mean_squared_error(test.values, predictions))
print(f'RMSE: {rmse}') | code |
128022928/cell_24 | [
"image_output_1.png"
] | from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
import pandas as pd
df = pd.read_csv('/kaggle/input/monthly-milk-production-pounds/monthlyMilkProduction.csv')
df.columns = ['Month', 'Production/Cow']
df = df[:-1]
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True)
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df, model='additive')
from statsmodels.tsa.stattools import adfuller
result = adfuller(df)
print(f'ADF Statistic: {result[0]}')
print(f'p-value: {result[1]}') | code |
128022928/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/monthly-milk-production-pounds/monthlyMilkProduction.csv')
df.columns = ['Month', 'Production/Cow']
df = df[:-1]
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True)
df.plot() | code |
128022928/cell_10 | [
"text_html_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
iris = sns.load_dataset('iris')
sns.scatterplot(data=iris, x='sepal_length', y='petal_length').set_title('scatter plot for “sepal_length” and “petal_length”') | code |
128022928/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
iris = sns.load_dataset('iris')
sns.regplot(data=iris, x='petal_length', y='petal_width').set_title('reg plot between petal_width and petal_length') | code |
128022928/cell_5 | [
"image_output_2.png",
"image_output_1.png"
] | import seaborn as sns
titanic = sns.load_dataset('titanic')
sns.violinplot(data=titanic, x='age', y='sex') | code |
90104745/cell_9 | [
"text_plain_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data | code |
90104745/cell_25 | [
"image_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.shape
new_data = data.drop(['sfm', 'kurt', 'meandom', 'meanfreq', 'dfrange', 'modindx'], axis=1)
new_data | code |
90104745/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | n_samples = 1000
n_features = 10
n_classes = 2
n_estimators = 25
max_depth = 10
model = cuRF(max_depth=max_depth, n_estimators=n_estimators, random_state=0)
trained_RF = model.fit(X_train, y_train) | code |
90104745/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score
import cuml as np
import numpy as np # linear algebra
n_samples = 1000
n_features = 10
n_classes = 2
n_estimators = 25
max_depth = 10
model = cuRF(max_depth=max_depth, n_estimators=n_estimators, random_state=0)
trained_RF = model.fit(X_train, y_train)
predictions = model.predict(X_test)
from cuml.datasets.classification import make_classification
from sklearn.metrics import accuracy_score
cu_score = np.metrics.accuracy_score(y_test, predictions)
sk_score = accuracy_score(y_test.get(), predictions.get())
print(predictions) | code |
90104745/cell_20 | [
"text_plain_output_1.png"
] | !pip install mglearn | code |
90104745/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import mglearn
import pandas
import seaborn
import seaborn
import matplotlib.pyplot as plt
import pandas
df_pandas = pandas.read_csv('../input/voicegender/voice.csv')
# performing the visualization in pandas Data Frame
male = df_pandas.loc[df_pandas['label']=='male']
female = df_pandas.loc[df_pandas['label']=='female']
fig, axes = plt.subplots(10, 2, figsize=(10,20))
ax = axes.ravel()
for i in range(20):
ax[i].hist(male.iloc[:,i], bins=20, color=mglearn.cm3(0), alpha=.5)
ax[i].hist(female.iloc[:, i], bins=20, color=mglearn.cm3(2), alpha=.5)
ax[i].set_title(list(male)[i])
ax[i].set_yticks(())
ax[i].set_xlabel("Feature magnitude")
ax[i].set_ylabel("Frequency")
ax[i].legend(["male", "female"], loc="best")
fig.tight_layout()
ND = df_pandas.drop(['sfm', 'kurt', 'meandom', 'meanfreq', 'dfrange', 'modindx'], axis=1)
ND
plt.figure(figsize=(16, 16))
seaborn.heatmap(ND.corr(), annot=True, cmap='viridis', linewidth=0.5) | code |
90104745/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas
import seaborn
import seaborn
import matplotlib.pyplot as plt
import pandas
df_pandas = pandas.read_csv('../input/voicegender/voice.csv')
plt.figure(figsize=(21, 21))
seaborn.heatmap(df_pandas.corr(), annot=True, cmap='viridis', linewidth=0.5) | code |
90104745/cell_7 | [
"text_plain_output_1.png"
] | import cudf as pd
import cuml as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy
import seaborn as sns
import sklearn
import sys
import sys
import scipy
print('Environment specification:\n')
print('python', '%s.%s.%s' % sys.version_info[:3])
for mod in (np, scipy, sns, sklearn, pd):
print(mod.__name__, mod.__version__) | code |
90104745/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score
import cuml as np
import numpy as np # linear algebra
n_samples = 1000
n_features = 10
n_classes = 2
n_estimators = 25
max_depth = 10
model = cuRF(max_depth=max_depth, n_estimators=n_estimators, random_state=0)
trained_RF = model.fit(X_train, y_train)
predictions = model.predict(X_test)
from cuml.datasets.classification import make_classification
from sklearn.metrics import accuracy_score
cu_score = np.metrics.accuracy_score(y_test, predictions)
sk_score = accuracy_score(y_test.get(), predictions.get())
print(' cuml accuracy: ', cu_score)
print(' sklearn accuracy : ', sk_score) | code |
90104745/cell_28 | [
"text_html_output_1.png"
] | from cuml.model_selection import train_test_split
from cuml.model_selection import train_test_split
from sklearn.model_selection import cross_val_score,train_test_split
import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.shape
new_data = data.drop(['sfm', 'kurt', 'meandom', 'meanfreq', 'dfrange', 'modindx'], axis=1)
new_data
new_data['label'] = new_data['label'].map({'male': 1, 'female': 0})
print(new_data)
from cuml.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(new_data.iloc[:, :-1].values, new_data.iloc[:, -1].values, test_size=0.2, random_state=42) | code |
90104745/cell_15 | [
"text_plain_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.shape | code |
90104745/cell_16 | [
"text_plain_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.shape
print('Total number of labels: {}'.format(data.shape[0])) | code |
90104745/cell_17 | [
"text_plain_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.shape
print('Number of male: {}'.format(data[data.label == 'male'].shape[0]))
print('Number of female: {}'.format(data[data.label == 'female'].shape[0])) | code |
90104745/cell_31 | [
"text_plain_output_1.png"
] | n_samples = 1000
n_features = 10
n_classes = 2
n_estimators = 25
max_depth = 10
model = cuRF(max_depth=max_depth, n_estimators=n_estimators, random_state=0)
trained_RF = model.fit(X_train, y_train)
predictions = model.predict(X_test) | code |
90104745/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import mglearn
import pandas
import seaborn
import seaborn
import matplotlib.pyplot as plt
import pandas
df_pandas = pandas.read_csv('../input/voicegender/voice.csv')
# performing the visualization in pandas Data Frame
male = df_pandas.loc[df_pandas['label']=='male']
female = df_pandas.loc[df_pandas['label']=='female']
fig, axes = plt.subplots(10, 2, figsize=(10,20))
ax = axes.ravel()
for i in range(20):
ax[i].hist(male.iloc[:,i], bins=20, color=mglearn.cm3(0), alpha=.5)
ax[i].hist(female.iloc[:, i], bins=20, color=mglearn.cm3(2), alpha=.5)
ax[i].set_title(list(male)[i])
ax[i].set_yticks(())
ax[i].set_xlabel("Feature magnitude")
ax[i].set_ylabel("Frequency")
ax[i].legend(["male", "female"], loc="best")
fig.tight_layout()
ND = df_pandas.drop(['sfm', 'kurt', 'meandom', 'meanfreq', 'dfrange', 'modindx'], axis=1)
ND | code |
90104745/cell_14 | [
"text_html_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum()
data.info() | code |
90104745/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import mglearn
import pandas
import seaborn
import seaborn
import matplotlib.pyplot as plt
import pandas
df_pandas = pandas.read_csv('../input/voicegender/voice.csv')
male = df_pandas.loc[df_pandas['label'] == 'male']
female = df_pandas.loc[df_pandas['label'] == 'female']
fig, axes = plt.subplots(10, 2, figsize=(10, 20))
ax = axes.ravel()
for i in range(20):
ax[i].hist(male.iloc[:, i], bins=20, color=mglearn.cm3(0), alpha=0.5)
ax[i].hist(female.iloc[:, i], bins=20, color=mglearn.cm3(2), alpha=0.5)
ax[i].set_title(list(male)[i])
ax[i].set_yticks(())
ax[i].set_xlabel('Feature magnitude')
ax[i].set_ylabel('Frequency')
ax[i].legend(['male', 'female'], loc='best')
fig.tight_layout() | code |
90104745/cell_12 | [
"text_plain_output_1.png"
] | import cudf as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/voicegender/voice.csv')
data.isnull().sum() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.