Alex Hortua commited on
Commit
ebf4f46
·
1 Parent(s): e2855de

Adding new information

Browse files
notebooks/training_visualization.ipynb CHANGED
@@ -10,9 +10,22 @@
10
  },
11
  {
12
  "cell_type": "code",
13
- "execution_count": null,
14
  "metadata": {},
15
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  "source": [
17
  "import matplotlib.pyplot as plt\n",
18
  "import json\n",
@@ -67,7 +80,7 @@
67
  "name": "python",
68
  "nbconvert_exporter": "python",
69
  "pygments_lexer": "ipython3",
70
- "version": "3.8"
71
  }
72
  },
73
  "nbformat": 4,
 
10
  },
11
  {
12
  "cell_type": "code",
13
+ "execution_count": 1,
14
  "metadata": {},
15
+ "outputs": [
16
+ {
17
+ "ename": "FileNotFoundError",
18
+ "evalue": "[Errno 2] No such file or directory: 'config.yaml'",
19
+ "output_type": "error",
20
+ "traceback": [
21
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
22
+ "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
23
+ "Cell \u001b[0;32mIn[1], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mjson\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01myaml\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mconfig.yaml\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f: \n\u001b[1;32m 6\u001b[0m config \u001b[38;5;241m=\u001b[39m yaml\u001b[38;5;241m.\u001b[39msafe_load(f)\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# Load training log file\u001b[39;00m\n",
24
+ "File \u001b[0;32m~/studies/northEastern/Computer Vision/Assigments/objectlocalization/venv/lib/python3.12/site-packages/IPython/core/interactiveshell.py:324\u001b[0m, in \u001b[0;36m_modified_open\u001b[0;34m(file, *args, **kwargs)\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m {\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m}:\n\u001b[1;32m 318\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 319\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mIPython won\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt let you open fd=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfile\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m by default \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 320\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mas it is likely to crash IPython. If you know what you are doing, \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myou can use builtins\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m open.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 322\u001b[0m )\n\u001b[0;32m--> 324\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mio_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
25
+ "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'config.yaml'"
26
+ ]
27
+ }
28
+ ],
29
  "source": [
30
  "import matplotlib.pyplot as plt\n",
31
  "import json\n",
 
80
  "name": "python",
81
  "nbconvert_exporter": "python",
82
  "pygments_lexer": "ipython3",
83
+ "version": "3.12.2"
84
  }
85
  },
86
  "nbformat": 4,
src/traina.py DELETED
@@ -1,100 +0,0 @@
1
- import os
2
- import torch
3
- import torchvision
4
- import json
5
- import yaml
6
- from tqdm import tqdm
7
- from torch.utils.data import DataLoader, random_split
8
- from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
9
- from torchvision.transforms import functional as F
10
- from src.dataset import LegoDataset
11
- from src.evaluate import calculate_map
12
-
13
- # Load Configuration
14
- with open("config.yaml", "r") as f:
15
- config = yaml.safe_load(f)
16
-
17
- # Load Pretrained Model
18
- model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights="DEFAULT")
19
-
20
- # Freeze Backbone
21
- for param in model.backbone.parameters():
22
- param.requires_grad = False
23
-
24
- # Modify Predictor
25
- num_classes = config["model"]["num_classes"]
26
- in_features = model.roi_heads.box_predictor.cls_score.in_features
27
- model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
28
-
29
- # Training Setup
30
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
- model.to(device)
32
-
33
- optimizer = torch.optim.Adam(model.parameters(), lr=config["model"]["learning_rate"])
34
-
35
- # Load Dataset
36
- image_dir = config["dataset"]["image_dir"]
37
- annotation_dir = config["dataset"]["annotation_dir"]
38
- dataset = LegoDataset(image_dir, annotation_dir)
39
- train_size = int(config["dataset"]["train_split"] * len(dataset))
40
- val_size = len(dataset) - train_size
41
- train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
42
- train_loader = DataLoader(train_dataset, batch_size=config["model"]["batch_size"], shuffle=True, collate_fn=lambda x: tuple(zip(*x)))
43
- val_loader = DataLoader(val_dataset, batch_size=config["model"]["batch_size"], shuffle=False, collate_fn=lambda x: tuple(zip(*x)))
44
-
45
- # Logging Function (Writes logs efficiently to file)
46
- log_file = "models/training_log.txt"
47
- def log_message(message):
48
- with open(log_file, "a") as f:
49
- f.write(message + "\n")
50
-
51
- # Training Function with tqdm progress bar
52
- def train_one_epoch(model, optimizer, data_loader, device):
53
- model.train()
54
- running_loss = 0.0
55
-
56
- for batch_idx, (images, targets) in enumerate(tqdm(data_loader, desc="Training Progress")):
57
- images = [F.to_tensor(img).to(device) for img in images]
58
- targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
59
-
60
- loss_dict = model(images, targets)
61
- loss = sum(loss for loss in loss_dict.values())
62
-
63
- optimizer.zero_grad()
64
- loss.backward()
65
- optimizer.step()
66
-
67
- running_loss += loss.item()
68
-
69
- return running_loss / len(data_loader)
70
-
71
- # Train Model with Logging
72
- num_epochs = config["model"]["epochs"]
73
- os.makedirs("models", exist_ok=True) # Ensure model directory exists
74
-
75
- for epoch in range(num_epochs):
76
- log_message(f"Starting Epoch {epoch+1}/{num_epochs}")
77
- loss = train_one_epoch(model, optimizer, train_loader, device)
78
- log_message(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss:.4f}")
79
-
80
- # Evaluate mAP after each epoch
81
- mAP = calculate_map(model, val_loader, device)
82
- log_message(f"Validation mAP: {mAP:.4f}")
83
-
84
- # Save log in JSON for visualization
85
- log_json = "models/training_log.json"
86
- if not os.path.exists(log_json):
87
- log_data = {"loss": [], "mAP": []}
88
- else:
89
- with open(log_json, "r") as f:
90
- log_data = json.load(f)
91
-
92
- log_data["loss"].append(loss)
93
- log_data["mAP"].append(mAP)
94
-
95
- with open(log_json, "w") as f:
96
- json.dump(log_data, f, indent=4)
97
-
98
- # Save the trained model
99
- torch.save(model.state_dict(), "models/lego_fasterrcnn.pth")
100
- log_message("Model saved as models/lego_fasterrcnn.pth")