Guru-25 commited on
Commit
cb29a61
·
verified ·
1 Parent(s): 8729b95
Files changed (1) hide show
  1. app.py +76 -78
app.py CHANGED
@@ -64,9 +64,6 @@ is_unconscious = False
64
  frame_count_webcam = 0
65
  stop_gaze_processing = False
66
 
67
- # --- Global State Variables for Distraction Webcam ---
68
- stop_distraction_processing = False
69
-
70
  # Constants
71
  GAZE_STABILITY_THRESHOLD = 0.5
72
  TIME_THRESHOLD = 15
@@ -202,6 +199,74 @@ def analyze_video(input_video):
202
  out.release()
203
  return temp_path
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  def terminate_gaze_stream():
206
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
207
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing
@@ -220,12 +285,6 @@ def terminate_gaze_stream():
220
  frame_count_webcam = 0
221
  return "Gaze Processing Terminated. State Reset."
222
 
223
- def terminate_distraction_stream():
224
- global stop_distraction_processing
225
- print("Distraction Termination signal received. Stopping processing.")
226
- stop_distraction_processing = True
227
- return "Distraction Processing Terminated."
228
-
229
  def process_gaze_frame(frame):
230
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
231
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing
@@ -338,58 +397,6 @@ def process_gaze_frame(frame):
338
  cv2.putText(error_frame, f"Error: {e}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
339
  return error_frame
340
 
341
- def process_distraction_frame(frame):
342
- global stop_distraction_processing
343
-
344
- if stop_distraction_processing:
345
- return np.zeros((480, 640, 3), dtype=np.uint8)
346
-
347
- if frame is None:
348
- return np.zeros((480, 640, 3), dtype=np.uint8)
349
-
350
- try:
351
- frame_to_process = frame
352
- results = distraction_model(frame_to_process, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
353
-
354
- display_text = "safe driving"
355
- alarm_action = None
356
-
357
- for result in results:
358
- if result.boxes is not None and len(result.boxes) > 0:
359
- boxes = result.boxes.xyxy.cpu().numpy()
360
- scores = result.boxes.conf.cpu().numpy()
361
- classes = result.boxes.cls.cpu().numpy()
362
-
363
- if len(boxes) > 0:
364
- max_score_idx = scores.argmax()
365
- detected_action_idx = int(classes[max_score_idx])
366
- if 0 <= detected_action_idx < len(distraction_class_names):
367
- detected_action = distraction_class_names[detected_action_idx]
368
- confidence = scores[max_score_idx]
369
- display_text = f"{detected_action}: {confidence:.2f}"
370
- if detected_action != 'safe driving':
371
- alarm_action = detected_action
372
- else:
373
- print(f"Warning: Detected class index {detected_action_idx} out of bounds.")
374
- display_text = "Unknown Detection"
375
-
376
- if alarm_action:
377
- print(f"ALARM: Unsafe behavior detected - {alarm_action}!")
378
- cv2.putText(frame, f"ALARM: {alarm_action}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
379
-
380
- text_color = (0, 255, 0) if alarm_action is None else (0, 255, 255)
381
- cv2.putText(frame, display_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
382
-
383
- return frame
384
-
385
- except Exception as e:
386
- print(f"Error processing distraction frame: {e}")
387
- error_frame = np.zeros((480, 640, 3), dtype=np.uint8)
388
- if not error_frame.flags.writeable:
389
- error_frame = error_frame.copy()
390
- cv2.putText(error_frame, f"Error: {e}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
391
- return error_frame
392
-
393
  def create_gaze_interface():
394
  with gr.Blocks() as gaze_demo:
395
  gr.Markdown("## Real-time Gaze & Drowsiness Tracking")
@@ -409,21 +416,13 @@ def create_gaze_interface():
409
  return gaze_demo
410
 
411
  def create_distraction_interface():
412
- with gr.Blocks() as distraction_demo:
413
- gr.Markdown("## Real-time Distraction Detection")
414
- with gr.Row():
415
- webcam_stream = WebRTC(label="Webcam Stream")
416
- with gr.Row():
417
- terminate_btn = gr.Button("Terminate Process")
418
-
419
- webcam_stream.stream(
420
- fn=process_distraction_frame,
421
- inputs=[webcam_stream],
422
- outputs=[webcam_stream]
423
- )
424
-
425
- terminate_btn.click(fn=terminate_distraction_stream, inputs=None, outputs=None)
426
-
427
  return distraction_demo
428
 
429
  def create_video_interface():
@@ -438,7 +437,7 @@ def create_video_interface():
438
 
439
  demo = gr.TabbedInterface(
440
  [create_video_interface(), create_gaze_interface(), create_distraction_interface()],
441
- ["Video Upload", "Gaze & Drowsiness", "Distraction Detection"],
442
  title="Driver Monitoring System"
443
  )
444
 
@@ -454,5 +453,4 @@ if __name__ == "__main__":
454
  is_unconscious = False
455
  frame_count_webcam = 0
456
  stop_gaze_processing = False
457
- stop_distraction_processing = False
458
  demo.launch()
 
64
  frame_count_webcam = 0
65
  stop_gaze_processing = False
66
 
 
 
 
67
  # Constants
68
  GAZE_STABILITY_THRESHOLD = 0.5
69
  TIME_THRESHOLD = 15
 
199
  out.release()
200
  return temp_path
201
 
202
+ def analyze_distraction_video(input_video):
203
+ cap = cv2.VideoCapture(input_video)
204
+ if not cap.isOpened():
205
+ print("Error: Could not open video file.")
206
+ return None
207
+
208
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
209
+ temp_fd, temp_path = tempfile.mkstemp(suffix='.mp4')
210
+ os.close(temp_fd)
211
+ out = None
212
+
213
+ fps = cap.get(cv2.CAP_PROP_FPS) or 30
214
+
215
+ while True:
216
+ ret, frame = cap.read()
217
+ if not ret:
218
+ break
219
+
220
+ try:
221
+ results = distraction_model(frame, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
222
+
223
+ display_text = "safe driving"
224
+ alarm_action = None
225
+
226
+ for result in results:
227
+ if result.boxes is not None and len(result.boxes) > 0:
228
+ boxes = result.boxes.xyxy.cpu().numpy()
229
+ scores = result.boxes.conf.cpu().numpy()
230
+ classes = result.boxes.cls.cpu().numpy()
231
+
232
+ if len(boxes) > 0:
233
+ max_score_idx = scores.argmax()
234
+ detected_action_idx = int(classes[max_score_idx])
235
+ if 0 <= detected_action_idx < len(distraction_class_names):
236
+ detected_action = distraction_class_names[detected_action_idx]
237
+ confidence = scores[max_score_idx]
238
+ display_text = f"{detected_action}: {confidence:.2f}"
239
+ if detected_action != 'safe driving':
240
+ alarm_action = detected_action
241
+ else:
242
+ print(f"Warning: Detected class index {detected_action_idx} out of bounds.")
243
+ display_text = "Unknown Detection"
244
+
245
+ if alarm_action:
246
+ print(f"ALARM: Unsafe behavior detected - {alarm_action}!")
247
+ cv2.putText(frame, f"ALARM: {alarm_action}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
248
+
249
+ text_color = (0, 255, 0) if alarm_action is None else (0, 255, 255)
250
+ cv2.putText(frame, display_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
251
+
252
+ if out is None:
253
+ h, w = frame.shape[:2]
254
+ out = cv2.VideoWriter(temp_path, fourcc, fps, (w, h))
255
+ out.write(frame)
256
+
257
+ except Exception as e:
258
+ print(f"Error processing distraction frame in video: {e}")
259
+ if out is None:
260
+ h, w = frame.shape[:2]
261
+ out = cv2.VideoWriter(temp_path, fourcc, fps, (w, h))
262
+ cv2.putText(frame, f"Error: {e}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
263
+ out.write(frame)
264
+
265
+ cap.release()
266
+ if out:
267
+ out.release()
268
+ return temp_path
269
+
270
  def terminate_gaze_stream():
271
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
272
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing
 
285
  frame_count_webcam = 0
286
  return "Gaze Processing Terminated. State Reset."
287
 
 
 
 
 
 
 
288
  def process_gaze_frame(frame):
289
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
290
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing
 
397
  cv2.putText(error_frame, f"Error: {e}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
398
  return error_frame
399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  def create_gaze_interface():
401
  with gr.Blocks() as gaze_demo:
402
  gr.Markdown("## Real-time Gaze & Drowsiness Tracking")
 
416
  return gaze_demo
417
 
418
  def create_distraction_interface():
419
+ distraction_demo = gr.Interface(
420
+ fn=analyze_distraction_video,
421
+ inputs=gr.Video(sources=["upload", "webcam"], label="Input Video (Upload or Record)"),
422
+ outputs=gr.Video(label="Processed Video"),
423
+ title="Distraction Detection Analysis",
424
+ description="Upload or record a video to analyze driver distraction."
425
+ )
 
 
 
 
 
 
 
 
426
  return distraction_demo
427
 
428
  def create_video_interface():
 
437
 
438
  demo = gr.TabbedInterface(
439
  [create_video_interface(), create_gaze_interface(), create_distraction_interface()],
440
+ ["Gaze Video Upload", "Gaze & Drowsiness (Live)", "Distraction Video Upload"],
441
  title="Driver Monitoring System"
442
  )
443
 
 
453
  is_unconscious = False
454
  frame_count_webcam = 0
455
  stop_gaze_processing = False
 
456
  demo.launch()