Aumkeshchy2003 commited on
Commit
a35c6a5
·
verified ·
1 Parent(s): 11dab6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -50
app.py CHANGED
@@ -40,61 +40,79 @@ np.random.seed(42)
40
  colors = np.random.randint(0, 255, size=(len(model.names), 3), dtype=np.uint8)
41
 
42
  def process_video(video_path):
43
- # Fix: Handle different types of input from Gradio
44
- if isinstance(video_path, dict) and 'name' in video_path:
45
- video_path = video_path['name']
46
 
47
- cap = cv2.VideoCapture(video_path)
48
-
49
- if not cap.isOpened():
50
- return "Error: Could not open video file."
51
-
52
- frame_width = int(cap.get(3))
53
- frame_height = int(cap.get(4))
54
- fps = cap.get(cv2.CAP_PROP_FPS)
55
-
56
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
57
- output_path = "output_video.mp4"
58
- out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
59
-
60
- total_frames = 0
61
- total_time = 0
62
-
63
- while cap.isOpened():
64
- ret, frame = cap.read()
65
- if not ret:
66
- break
67
-
68
- start_time = time.time()
69
 
70
- # Convert frame for YOLOv5
71
- img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
72
- results = model(img, size=640)
73
 
74
- inference_time = time.time() - start_time
75
- total_time += inference_time
76
- total_frames += 1
 
 
 
 
77
 
78
- detections = results.pred[0].cpu().numpy()
 
 
79
 
80
- for *xyxy, conf, cls in detections:
81
- x1, y1, x2, y2 = map(int, xyxy)
82
- class_id = int(cls)
83
- color = colors[class_id].tolist()
84
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
85
- label = f"{model.names[class_id]} {conf:.2f}"
86
- cv2.putText(frame, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
87
 
88
- # Calculate FPS
89
- avg_fps = total_frames / total_time if total_time > 0 else 0
90
- cv2.putText(frame, f"FPS: {avg_fps:.2f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- out.write(frame)
 
 
 
 
 
 
93
 
94
- cap.release()
95
- out.release()
 
 
 
 
 
 
 
 
 
96
 
97
- return output_path
 
 
 
 
98
 
99
  def process_image(image):
100
  img = np.array(image)
@@ -164,10 +182,11 @@ with gr.Blocks(css=css, title="Video & Image Object Detection by YOLOv5") as dem
164
  with gr.Tabs():
165
  with gr.TabItem("Video Detection", elem_classes="tab-item"):
166
  with gr.Row():
167
- # Fix: Changed from gr.Video to gr.File to ensure proper upload handling
168
- video_input = gr.File(
169
- label="Upload Video File",
170
- file_types=["video"],
 
171
  elem_id="video-input"
172
  )
173
 
 
40
  colors = np.random.randint(0, 255, size=(len(model.names), 3), dtype=np.uint8)
41
 
42
  def process_video(video_path):
43
+ # Ensure we have a valid path
44
+ if video_path is None:
45
+ return None
46
 
47
+ try:
48
+ # For newer Gradio versions, video might be returned as a tuple
49
+ if isinstance(video_path, tuple) and len(video_path) >= 1:
50
+ video_path = video_path[0]
51
+ # Or a dict with a 'name' key
52
+ elif isinstance(video_path, dict) and 'name' in video_path:
53
+ video_path = video_path['name']
54
+ # Make sure it's a string
55
+ video_path = str(video_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
+ cap = cv2.VideoCapture(video_path)
 
 
58
 
59
+ if not cap.isOpened():
60
+ print(f"Error: Could not open video file at {video_path}")
61
+ return None
62
+
63
+ frame_width = int(cap.get(3))
64
+ frame_height = int(cap.get(4))
65
+ fps = cap.get(cv2.CAP_PROP_FPS)
66
 
67
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
68
+ output_path = "output_video.mp4"
69
+ out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
70
 
71
+ total_frames = 0
72
+ total_time = 0
 
 
 
 
 
73
 
74
+ while cap.isOpened():
75
+ ret, frame = cap.read()
76
+ if not ret:
77
+ break
78
+
79
+ start_time = time.time()
80
+
81
+ # Convert frame for YOLOv5
82
+ img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
83
+ results = model(img, size=640)
84
+
85
+ inference_time = time.time() - start_time
86
+ total_time += inference_time
87
+ total_frames += 1
88
+
89
+ detections = results.pred[0].cpu().numpy()
90
 
91
+ for *xyxy, conf, cls in detections:
92
+ x1, y1, x2, y2 = map(int, xyxy)
93
+ class_id = int(cls)
94
+ color = colors[class_id].tolist()
95
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
96
+ label = f"{model.names[class_id]} {conf:.2f}"
97
+ cv2.putText(frame, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
98
 
99
+ # Calculate FPS
100
+ avg_fps = total_frames / total_time if total_time > 0 else 0
101
+ cv2.putText(frame, f"FPS: {avg_fps:.2f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
102
+
103
+ out.write(frame)
104
+
105
+ cap.release()
106
+ out.release()
107
+
108
+ print(f"Video processed successfully, output at: {output_path}")
109
+ return output_path
110
 
111
+ except Exception as e:
112
+ print(f"Error processing video: {str(e)}")
113
+ import traceback
114
+ traceback.print_exc()
115
+ return None
116
 
117
  def process_image(image):
118
  img = np.array(image)
 
182
  with gr.Tabs():
183
  with gr.TabItem("Video Detection", elem_classes="tab-item"):
184
  with gr.Row():
185
+ # Keep using gr.Video but with source="upload" parameter
186
+ video_input = gr.Video(
187
+ label="Upload Video",
188
+ interactive=True,
189
+ source="upload", # Explicitly set upload as source
190
  elem_id="video-input"
191
  )
192