|
import pandas as pd |
|
import numpy as np |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.optim as optim |
|
from sklearn.model_selection import train_test_split |
|
from torch.utils.data import DataLoader, TensorDataset |
|
from torchvision import transforms |
|
|
|
|
|
train_df = pd.read_csv("./input/train.csv") |
|
|
|
|
|
X = train_df.drop("label", axis=1).values.reshape(-1, 1, 28, 28).astype("float32") |
|
y = train_df["label"].values |
|
X /= 255.0 |
|
|
|
|
|
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
|
|
X_train_tensor = torch.tensor(X_train) |
|
y_train_tensor = torch.tensor(y_train, dtype=torch.long) |
|
X_val_tensor = torch.tensor(X_val) |
|
y_val_tensor = torch.tensor(y_val, dtype=torch.long) |
|
|
|
|
|
train_dataset = TensorDataset(X_train_tensor, y_train_tensor) |
|
val_dataset = TensorDataset(X_val_tensor, y_val_tensor) |
|
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) |
|
val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False) |
|
|
|
|
|
|
|
class Net(nn.Module): |
|
def __init__(self): |
|
super(Net, self).__init__() |
|
self.conv1 = nn.Conv2d(1, 32, kernel_size=3) |
|
self.conv2 = nn.Conv2d(32, 64, kernel_size=3) |
|
self.fc1 = nn.Linear(64 * 5 * 5, 128) |
|
self.fc2 = nn.Linear(128, 10) |
|
|
|
def forward(self, x): |
|
x = F.relu(F.max_pool2d(self.conv1(x), 2)) |
|
x = F.relu(F.max_pool2d(self.conv2(x), 2)) |
|
x = x.view(-1, 64 * 5 * 5) |
|
x = F.relu(self.fc1(x)) |
|
x = self.fc2(x) |
|
return F.log_softmax(x, dim=1) |
|
|
|
|
|
|
|
model = Net() |
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.Adam(model.parameters()) |
|
|
|
|
|
num_epochs = 5 |
|
for epoch in range(num_epochs): |
|
model.train() |
|
for data, target in train_loader: |
|
optimizer.zero_grad() |
|
output = model(data) |
|
loss = criterion(output, target) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
|
|
model.eval() |
|
correct = 0 |
|
with torch.no_grad(): |
|
for data, target in val_loader: |
|
output = model(data) |
|
pred = output.argmax(dim=1, keepdim=True) |
|
correct += pred.eq(target.view_as(pred)).sum().item() |
|
|
|
accuracy = correct / len(val_loader.dataset) |
|
print(f"Validation Accuracy: {accuracy:.4f}") |
|
|