Aumkeshchy2003 commited on
Commit
3006b90
·
verified ·
1 Parent(s): 28c7524

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -77
app.py CHANGED
@@ -14,18 +14,19 @@ os.makedirs("models", exist_ok=True)
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  print(f"Using device: {device}")
16
 
17
- model_path = Path("models/yolov5x.pt")
 
18
  if model_path.exists():
19
  print(f"Loading model from cache: {model_path}")
20
- model = torch.hub.load("ultralytics/yolov5", "yolov5x", pretrained=True, source="local", path=str(model_path)).to(device)
21
  else:
22
- print("Downloading YOLOv5x model and caching...")
23
- model = torch.hub.load("ultralytics/yolov5", "yolov5x", pretrained=True).to(device)
24
  torch.save(model.state_dict(), model_path)
25
 
26
  # Model configurations for better performance
27
- model.conf = 0.5 # Slightly lower confidence threshold for real-time
28
- model.iou = 0.45 # Slightly lower IOU threshold for real-time
29
  model.classes = None # Detect all classes
30
  model.max_det = 20 # Limit detections for speed
31
 
@@ -44,6 +45,8 @@ colors = np.random.uniform(0, 255, size=(len(model.names), 3))
44
  total_inference_time = 0
45
  inference_count = 0
46
  fps_queue = Queue(maxsize=30) # Store last 30 FPS values for smoothing
 
 
47
 
48
  # Threading variables
49
  processing_lock = threading.Lock()
@@ -107,58 +110,73 @@ def detect_objects(image):
107
  def process_frame_thread():
108
  """Background thread for processing frames"""
109
  while not stop_event.is_set():
110
- if not frame_queue.empty():
111
- frame = frame_queue.get()
112
-
113
- # Skip if there's a processing lock (from image upload)
114
- if processing_lock.locked():
115
- result_queue.put(frame) # Return unprocessed frame
116
- continue
117
 
118
- # Process the frame
119
- with torch.no_grad(): # Ensure no gradients for inference
120
- input_size = 384 # Smaller size for real-time processing
121
- results = model(frame, size=input_size)
122
-
123
- # Calculate FPS
124
- inference_time = time.time() - frame.get('timestamp', time.time())
125
- current_fps = 1 / inference_time if inference_time > 0 else 30
126
-
127
- # Update rolling FPS average
128
- fps_queue.put(current_fps)
129
- avg_fps = sum(list(fps_queue.queue)) / fps_queue.qsize()
130
-
131
- # Draw detections
132
- output = frame['image'].copy()
133
- detections = results.pred[0].cpu().numpy()
134
-
135
- for *xyxy, conf, cls in detections:
136
- x1, y1, x2, y2 = map(int, xyxy)
137
- class_id = int(cls)
138
- color = colors[class_id].tolist()
139
 
140
- # Draw rectangle and label
141
- cv2.rectangle(output, (x1, y1), (x2, y2), color, 2, lineType=cv2.LINE_AA)
 
142
 
143
- label = f"{model.names[class_id]} {conf:.2f}"
144
- font_scale, font_thickness = 0.6, 1 # Smaller for real-time
145
- (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
 
 
 
 
 
 
146
 
147
- cv2.rectangle(output, (x1, y1 - h - 5), (x1 + w + 5, y1), color, -1)
148
- cv2.putText(output, label, (x1 + 3, y1 - 3),
149
- cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), font_thickness, lineType=cv2.LINE_AA)
150
-
151
- # Add FPS counter
152
- cv2.rectangle(output, (10, 10), (210, 80), (0, 0, 0), -1)
153
- cv2.putText(output, f"FPS: {current_fps:.1f}", (20, 40),
154
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, lineType=cv2.LINE_AA)
155
- cv2.putText(output, f"Avg FPS: {avg_fps:.1f}", (20, 70),
156
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, lineType=cv2.LINE_AA)
157
-
158
- # Put the processed frame in the result queue
159
- result_queue.put({'image': output, 'fps': current_fps})
160
- else:
161
- time.sleep(0.001) # Small sleep to prevent CPU spinning
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  def webcam_feed():
164
  """Generator function for webcam feed"""
@@ -170,20 +188,33 @@ def webcam_feed():
170
 
171
  # Open webcam
172
  cap = cv2.VideoCapture(0)
 
 
 
 
 
 
 
 
 
 
 
173
  cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
174
  cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
 
175
 
176
  try:
177
- while True:
178
  success, frame = cap.read()
179
  if not success:
 
180
  break
181
 
182
- # Put frame in queue for processing
183
  if not frame_queue.full():
184
  frame_queue.put({'image': frame, 'timestamp': time.time()})
185
 
186
- # Get processed frame from result queue
187
  if not result_queue.empty():
188
  result = result_queue.get()
189
  yield result['image']
@@ -191,8 +222,8 @@ def webcam_feed():
191
  # If no processed frame is available, yield the raw frame
192
  yield frame
193
 
194
- # Control frame rate to not overwhelm the system
195
- time.sleep(0.01)
196
  finally:
197
  cap.release()
198
 
@@ -245,24 +276,8 @@ with gr.Blocks(title="YOLOv5 Object Detection - Real-time & Image Upload") as de
245
  submit_button.click(fn=process_uploaded_image, inputs=input_image, outputs=output_image)
246
  clear_button.click(lambda: (None, None), None, [input_image, output_image])
247
 
248
- # Connect webcam feed
249
- demo.load(lambda: None, None, webcam_output, _js="""
250
- () => {
251
- // Keep the webcam tab refreshing at high frequency
252
- setInterval(() => {
253
- if (document.querySelector('.tabitem:first-child').style.display !== 'none') {
254
- const webcamImg = document.querySelector('.tabitem:first-child img');
255
- if (webcamImg) {
256
- const src = webcamImg.src;
257
- webcamImg.src = src.includes('?') ? src.split('?')[0] + '?t=' + Date.now() : src + '?t=' + Date.now();
258
- }
259
- }
260
- }, 33); // ~30 FPS refresh rate
261
- return [];
262
- }
263
- """)
264
-
265
  # Start webcam feed
 
266
  webcam_output.update(webcam_feed)
267
 
268
  # Cleanup function to stop threads when app closes
@@ -270,5 +285,6 @@ def cleanup():
270
  stop_event.set()
271
  print("Cleaning up threads...")
272
 
 
273
  demo.close = cleanup
274
- demo.launch()
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  print(f"Using device: {device}")
16
 
17
+ # Use YOLOv5n (nano) for higher FPS
18
+ model_path = Path("models/yolov5n.pt")
19
  if model_path.exists():
20
  print(f"Loading model from cache: {model_path}")
21
+ model = torch.hub.load("ultralytics/yolov5", "yolov5n", pretrained=True, source="local", path=str(model_path)).to(device)
22
  else:
23
+ print("Downloading YOLOv5n model and caching...")
24
+ model = torch.hub.load("ultralytics/yolov5", "yolov5n", pretrained=True).to(device)
25
  torch.save(model.state_dict(), model_path)
26
 
27
  # Model configurations for better performance
28
+ model.conf = 0.5 # Confidence threshold
29
+ model.iou = 0.45 # IOU threshold
30
  model.classes = None # Detect all classes
31
  model.max_det = 20 # Limit detections for speed
32
 
 
45
  total_inference_time = 0
46
  inference_count = 0
47
  fps_queue = Queue(maxsize=30) # Store last 30 FPS values for smoothing
48
+ for _ in range(30): # Initialize with reasonable values
49
+ fps_queue.put(30.0)
50
 
51
  # Threading variables
52
  processing_lock = threading.Lock()
 
110
  def process_frame_thread():
111
  """Background thread for processing frames"""
112
  while not stop_event.is_set():
113
+ try:
114
+ if not frame_queue.empty():
115
+ frame = frame_queue.get()
 
 
 
 
116
 
117
+ # Skip if there's a processing lock (from image upload)
118
+ if processing_lock.locked():
119
+ result_queue.put(frame) # Return unprocessed frame
120
+ continue
121
+
122
+ # Process the frame
123
+ start_time = time.time()
124
+ with torch.no_grad(): # Ensure no gradients for inference
125
+ input_size = 384 # Smaller size for real-time processing
126
+ results = model(frame['image'], size=input_size)
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ # Calculate FPS
129
+ inference_time = time.time() - start_time
130
+ current_fps = 1 / inference_time if inference_time > 0 else 30
131
 
132
+ # Update rolling FPS average
133
+ if not fps_queue.full():
134
+ fps_queue.put(current_fps)
135
+ else:
136
+ try:
137
+ fps_queue.get_nowait()
138
+ fps_queue.put(current_fps)
139
+ except:
140
+ pass
141
 
142
+ fps_values = list(fps_queue.queue)
143
+ avg_fps = sum(fps_values) / len(fps_values) if fps_values else 30.0
144
+
145
+ # Draw detections
146
+ output = frame['image'].copy()
147
+ detections = results.pred[0].cpu().numpy()
148
+
149
+ for *xyxy, conf, cls in detections:
150
+ x1, y1, x2, y2 = map(int, xyxy)
151
+ class_id = int(cls)
152
+ color = colors[class_id].tolist()
153
+
154
+ # Draw rectangle and label
155
+ cv2.rectangle(output, (x1, y1), (x2, y2), color, 2, lineType=cv2.LINE_AA)
156
+
157
+ label = f"{model.names[class_id]} {conf:.2f}"
158
+ font_scale, font_thickness = 0.6, 1 # Smaller for real-time
159
+ (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
160
+
161
+ cv2.rectangle(output, (x1, y1 - h - 5), (x1 + w + 5, y1), color, -1)
162
+ cv2.putText(output, label, (x1 + 3, y1 - 3),
163
+ cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), font_thickness, lineType=cv2.LINE_AA)
164
+
165
+ # Add FPS counter
166
+ cv2.rectangle(output, (10, 10), (210, 80), (0, 0, 0), -1)
167
+ cv2.putText(output, f"FPS: {current_fps:.1f}", (20, 40),
168
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, lineType=cv2.LINE_AA)
169
+ cv2.putText(output, f"Avg FPS: {avg_fps:.1f}", (20, 70),
170
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, lineType=cv2.LINE_AA)
171
+
172
+ # Put the processed frame in the result queue
173
+ if not result_queue.full():
174
+ result_queue.put({'image': output, 'fps': current_fps})
175
+ else:
176
+ time.sleep(0.001) # Small sleep to prevent CPU spinning
177
+ except Exception as e:
178
+ print(f"Error in frame processing thread: {e}")
179
+ time.sleep(0.1) # Pause briefly on error
180
 
181
  def webcam_feed():
182
  """Generator function for webcam feed"""
 
188
 
189
  # Open webcam
190
  cap = cv2.VideoCapture(0)
191
+ if not cap.isOpened():
192
+ print("Warning: Unable to open webcam! Using dummy frames instead.")
193
+ # Create a dummy frame with a message
194
+ dummy_frame = np.zeros((480, 640, 3), dtype=np.uint8)
195
+ cv2.putText(dummy_frame, "Webcam not available", (100, 240),
196
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
197
+ while True:
198
+ yield dummy_frame
199
+ time.sleep(0.033) # ~30 FPS
200
+
201
+ # Set webcam properties for best performance
202
  cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
203
  cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
204
+ cap.set(cv2.CAP_PROP_FPS, 30) # Request 30 FPS from camera if supported
205
 
206
  try:
207
+ while cap.isOpened():
208
  success, frame = cap.read()
209
  if not success:
210
+ print("Failed to read from webcam")
211
  break
212
 
213
+ # Put frame in queue for processing if not full
214
  if not frame_queue.full():
215
  frame_queue.put({'image': frame, 'timestamp': time.time()})
216
 
217
+ # Get processed frame from result queue if available
218
  if not result_queue.empty():
219
  result = result_queue.get()
220
  yield result['image']
 
222
  # If no processed frame is available, yield the raw frame
223
  yield frame
224
 
225
+ # Control frame rate
226
+ time.sleep(0.01) # Small delay to prevent overwhelming the system
227
  finally:
228
  cap.release()
229
 
 
276
  submit_button.click(fn=process_uploaded_image, inputs=input_image, outputs=output_image)
277
  clear_button.click(lambda: (None, None), None, [input_image, output_image])
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  # Start webcam feed
280
+ demo.load(fn=lambda: None, inputs=None, outputs=webcam_output)
281
  webcam_output.update(webcam_feed)
282
 
283
  # Cleanup function to stop threads when app closes
 
285
  stop_event.set()
286
  print("Cleaning up threads...")
287
 
288
+ # Register cleanup handler
289
  demo.close = cleanup
290
+ demo.launch(share=False)