Guru-25 commited on
Commit
ffaa468
·
verified ·
1 Parent(s): d343b30
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -13,6 +13,9 @@ import torch
13
  import json
14
  import requests
15
 
 
 
 
16
  def smooth_values(history, current_value, window_size=5):
17
  if current_value is not None:
18
  if isinstance(current_value, np.ndarray):
@@ -101,7 +104,7 @@ EYE_CLOSURE_THRESHOLD = 10
101
  HEAD_STABILITY_THRESHOLD = 0.05
102
  DISTRACTION_CONF_THRESHOLD = 0.1
103
 
104
- @spaces.GPU(duration=30) # Set duration to 30 seconds for real-time processing
105
  def analyze_video(input_video):
106
  cap = cv2.VideoCapture(input_video)
107
  local_gaze_predictor = GazePredictor(GAZE_MODEL_PATH)
@@ -229,7 +232,7 @@ def analyze_video(input_video):
229
  out.release()
230
  return temp_path
231
 
232
- @spaces.GPU(duration=30) # Set duration to 30 seconds for real-time processing
233
  def analyze_distraction_video(input_video):
234
  cap = cv2.VideoCapture(input_video)
235
  if not cap.isOpened():
@@ -243,8 +246,10 @@ def analyze_distraction_video(input_video):
243
 
244
  fps = cap.get(cv2.CAP_PROP_FPS) or 30
245
 
246
- local_distraction_model = YOLO(DISTRACTION_MODEL_PATH)
247
- local_distraction_model.to('cpu')
 
 
248
 
249
  while True:
250
  ret, frame = cap.read()
@@ -252,7 +257,7 @@ def analyze_distraction_video(input_video):
252
  break
253
 
254
  try:
255
- results = local_distraction_model(frame, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
256
 
257
  display_text = "safe driving"
258
  alarm_action = None
@@ -301,9 +306,10 @@ def analyze_distraction_video(input_video):
301
  out.release()
302
  return temp_path
303
 
304
- @spaces.GPU(duration=30) # Set duration to 30 seconds for real-time processing
305
  def process_distraction_frame(frame):
306
  global stop_distraction_processing
 
307
 
308
  if stop_distraction_processing:
309
  return np.zeros((480, 640, 3), dtype=np.uint8)
@@ -311,12 +317,13 @@ def process_distraction_frame(frame):
311
  if frame is None:
312
  return np.zeros((480, 640, 3), dtype=np.uint8)
313
 
314
- local_distraction_model = YOLO(DISTRACTION_MODEL_PATH)
315
- local_distraction_model.to('cpu')
 
316
 
317
  try:
318
  # Run distraction detection model
319
- results = local_distraction_model(frame, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
320
 
321
  display_text = "safe driving"
322
  alarm_action = None
@@ -396,7 +403,7 @@ def terminate_distraction_stream():
396
  stop_distraction_processing = True
397
  return "Distraction Processing Terminated."
398
 
399
- @spaces.GPU(duration=30) # Set duration to 30 seconds for real-time processing
400
  def process_gaze_frame(frame):
401
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
402
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing
 
13
  import json
14
  import requests
15
 
16
+ # --- Model cache variables ---
17
+ distraction_model_cache = None
18
+
19
  def smooth_values(history, current_value, window_size=5):
20
  if current_value is not None:
21
  if isinstance(current_value, np.ndarray):
 
104
  HEAD_STABILITY_THRESHOLD = 0.05
105
  DISTRACTION_CONF_THRESHOLD = 0.1
106
 
107
+ @spaces.GPU(duration=60) # Extended duration to 60 seconds for longer streaming
108
  def analyze_video(input_video):
109
  cap = cv2.VideoCapture(input_video)
110
  local_gaze_predictor = GazePredictor(GAZE_MODEL_PATH)
 
232
  out.release()
233
  return temp_path
234
 
235
+ @spaces.GPU(duration=60) # Extended duration to 60 seconds for longer streaming
236
  def analyze_distraction_video(input_video):
237
  cap = cv2.VideoCapture(input_video)
238
  if not cap.isOpened():
 
246
 
247
  fps = cap.get(cv2.CAP_PROP_FPS) or 30
248
 
249
+ global distraction_model_cache
250
+ if distraction_model_cache is None:
251
+ distraction_model_cache = YOLO(DISTRACTION_MODEL_PATH)
252
+ distraction_model_cache.to('cpu')
253
 
254
  while True:
255
  ret, frame = cap.read()
 
257
  break
258
 
259
  try:
260
+ results = distraction_model_cache(frame, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
261
 
262
  display_text = "safe driving"
263
  alarm_action = None
 
306
  out.release()
307
  return temp_path
308
 
309
+ @spaces.GPU(duration=60) # Extended duration to 60 seconds for longer streaming
310
  def process_distraction_frame(frame):
311
  global stop_distraction_processing
312
+ global distraction_model_cache
313
 
314
  if stop_distraction_processing:
315
  return np.zeros((480, 640, 3), dtype=np.uint8)
 
317
  if frame is None:
318
  return np.zeros((480, 640, 3), dtype=np.uint8)
319
 
320
+ if distraction_model_cache is None:
321
+ distraction_model_cache = YOLO(DISTRACTION_MODEL_PATH)
322
+ distraction_model_cache.to('cpu')
323
 
324
  try:
325
  # Run distraction detection model
326
+ results = distraction_model_cache(frame, conf=DISTRACTION_CONF_THRESHOLD, verbose=False)
327
 
328
  display_text = "safe driving"
329
  alarm_action = None
 
403
  stop_distraction_processing = True
404
  return "Distraction Processing Terminated."
405
 
406
+ @spaces.GPU(duration=60) # Extended duration to 60 seconds for longer streaming
407
  def process_gaze_frame(frame):
408
  global gaze_history, head_history, ear_history, stable_gaze_time, stable_head_time
409
  global eye_closed_time, blink_count, start_time, is_unconscious, frame_count_webcam, stop_gaze_processing