practisebook commited on
Commit
04a2bd6
·
verified ·
1 Parent(s): 34fa079

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -41
app.py CHANGED
@@ -1,50 +1,37 @@
 
1
  import gradio as gr
2
  from ultralytics import YOLO
3
  import cv2
4
- from gtts import gTTS
5
- import numpy as np
6
- import tempfile
7
- import os
8
 
9
  # Load YOLOv8 model
10
- model = YOLO("yolov8n.pt") # Make sure the YOLOv8 model file is in the same directory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Function to process the video frame and detect objects
13
- def detect_objects(image):
14
- # Perform object detection
15
- results = model(image)
16
- annotated_frame = results[0].plot() # Annotate the frame with bounding boxes
17
-
18
- # Extract detected object labels
19
- detected_objects = [model.names[int(box.cls)] for box in results[0].boxes]
20
- if detected_objects:
21
- objects_text = ", ".join(set(detected_objects))
22
- # Generate audio alert for detected objects
23
- tts = gTTS(f"Detected: {objects_text}", lang="en")
24
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
25
- tts.save(temp_file.name)
26
- return annotated_frame, temp_file.name
27
- return annotated_frame, None
28
 
29
- # Gradio Interface
30
- def process_frame(image):
31
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
32
- annotated_frame, audio_file = detect_objects(image)
33
- if audio_file:
34
- return annotated_frame, audio_file
35
- else:
36
- return annotated_frame, None
37
 
38
- # Gradio interface for real-time webcam feed
39
- webcam = gr.Interface(
40
- fn=process_frame,
41
- inputs=gr.Image(source="webcam", tool="editor", type="numpy"),
42
- outputs=[
43
- gr.Image(label="Detected Objects"),
44
- gr.Audio(label="Audio Alert (if any)")
45
- ],
46
- live=True, # Enable live streaming from webcam
47
- )
48
 
49
- # Launch Gradio App
50
- webcam.launch()
 
1
+ import os
2
  import gradio as gr
3
  from ultralytics import YOLO
4
  import cv2
5
+ from datetime import datetime
 
 
 
6
 
7
  # Load YOLOv8 model
8
+ model = YOLO("yolov8n.pt")
9
+
10
+ def detect_objects(video):
11
+ cap = cv2.VideoCapture(video)
12
+ frames = []
13
+ while cap.isOpened():
14
+ ret, frame = cap.read()
15
+ if not ret:
16
+ break
17
+ results = model(frame)
18
+ annotated_frame = results[0].plot()
19
+ _, buffer = cv2.imencode('.jpg', annotated_frame)
20
+ frames.append(buffer.tobytes())
21
+ cap.release()
22
+ return frames
23
 
24
+ # Create Gradio interface
25
+ with gr.Blocks() as demo:
26
+ gr.Markdown("# Real-Time Object Detection for Blind Assistance")
27
+ gr.Markdown("This app detects objects in real-time using your webcam.")
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # Use gr.Video for webcam input
30
+ video_input = gr.Video(source="webcam", label="Webcam Stream")
31
+ output_gallery = gr.Video(label="Detection Output")
 
 
 
 
 
32
 
33
+ detect_button = gr.Button("Start Detection")
34
+ detect_button.click(detect_objects, inputs=[video_input], outputs=[output_gallery])
 
 
 
 
 
 
 
 
35
 
36
+ # Launch the app
37
+ demo.launch()