|
import pandas as pd |
|
import numpy as np |
|
from lightgbm import LGBMRegressor |
|
from sklearn.model_selection import KFold |
|
from sklearn.metrics import mean_absolute_error |
|
|
|
|
|
train_data = pd.read_csv("./input/train.csv") |
|
test_data = pd.read_csv("./input/test.csv") |
|
|
|
|
|
numeric_columns_train = train_data.select_dtypes(include=[np.number]).columns |
|
train_data[numeric_columns_train] = train_data[numeric_columns_train].fillna( |
|
train_data[numeric_columns_train].median() |
|
) |
|
|
|
|
|
numeric_columns_test = test_data.select_dtypes(include=[np.number]).columns |
|
test_data[numeric_columns_test] = test_data[numeric_columns_test].fillna( |
|
test_data[numeric_columns_test].median() |
|
) |
|
|
|
|
|
X = train_data.drop(["row_id", "target"], axis=1) |
|
y = train_data["target"] |
|
|
|
|
|
model = LGBMRegressor() |
|
|
|
|
|
kf = KFold(n_splits=10, shuffle=True, random_state=42) |
|
mae_scores = [] |
|
|
|
|
|
for train_index, val_index in kf.split(X): |
|
X_train, X_val = X.iloc[train_index], X.iloc[val_index] |
|
y_train, y_val = y.iloc[train_index], y.iloc[val_index] |
|
|
|
|
|
model.fit(X_train, y_train) |
|
|
|
|
|
y_pred = model.predict(X_val) |
|
|
|
|
|
mae = mean_absolute_error(y_val, y_pred) |
|
mae_scores.append(mae) |
|
|
|
|
|
print(f"Average MAE: {np.mean(mae_scores)}") |
|
|
|
|
|
test_features = test_data.drop(["row_id"], axis=1) |
|
test_data["target"] = model.predict(test_features) |
|
|
|
|
|
submission = test_data[["row_id", "target"]] |
|
submission.to_csv("./working/submission.csv", index=False) |
|
|