Spaces:
Sleeping
Sleeping
File size: 2,565 Bytes
3efedb0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
import pickle
import os
# Get the project root directory
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load the dataset
data_path = os.path.join(project_root, 'data', 'loan_approval_dataset.csv')
print(f"Loading data from: {data_path}")
df = pd.read_csv(data_path)
# Clean column names (remove leading/trailing spaces)
df.columns = df.columns.str.strip()
# Clean string values (remove leading/trailing spaces)
for col in df.select_dtypes(include=['object']).columns:
df[col] = df[col].str.strip()
# Identify numerical and categorical columns
numerical_features = ['no_of_dependents', 'income_annum', 'loan_amount', 'loan_term',
'cibil_score', 'residential_assets_value', 'commercial_assets_value',
'luxury_assets_value', 'bank_asset_value']
categorical_features = ['education', 'self_employed']
# Prepare features and target
X = df[numerical_features + categorical_features]
y = df['loan_status'].map({'Approved': 1, 'Rejected': 0})
# Create preprocessing pipeline
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())
])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numerical_features),
('cat', categorical_transformer, categorical_features)
])
# Create full pipeline
model = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', RandomForestClassifier(n_estimators=100, random_state=42))
])
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Fit the model
model.fit(X_train, y_train)
# Evaluate the model
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print(f"Train accuracy: {train_score:.3f}")
print(f"Test accuracy: {test_score:.3f}")
# Save the model
models_dir = os.path.join(project_root, 'models')
os.makedirs(models_dir, exist_ok=True)
# Save as pickle file
with open(os.path.join(models_dir, 'loan_model.pkl'), 'wb') as f:
pickle.dump(model, f)
print("Model saved successfully as loan_model.pkl!") |