Spaces:
Running
Running
import streamlit as st | |
import cv2 | |
import numpy as np | |
import base64 | |
import io | |
from collections import Counter | |
from PIL import Image | |
from ultralytics import YOLO | |
import os | |
# Set page config | |
st.set_page_config(page_title="Object Detection App", layout="wide") | |
# Model paths | |
MODELS = { | |
'yolov8s.pt': './model/yolov8s.pt', | |
'yolov9m.pt': './model/yolov9m.pt' | |
} | |
# Load models on demand | |
def get_model(model_name): | |
"""Load model if not already loaded""" | |
if model_name in MODELS and os.path.exists(MODELS[model_name]): | |
return YOLO(MODELS[model_name]) | |
else: | |
raise ValueError(f"Model {model_name} not found") | |
def decode_base64_image(base64_string): | |
"""Base64 image string ko decode karna""" | |
# Remove data URL prefix if present | |
if ',' in base64_string: | |
base64_string = base64_string.split(',')[1] | |
image_data = base64.b64decode(base64_string) | |
image = Image.open(io.BytesIO(image_data)) | |
return np.array(image) | |
def process_detections(results, model): | |
"""Process detection results into standard format""" | |
detections = [] | |
for result in results: | |
boxes = result.boxes | |
for box in boxes: | |
# Bounding box coordinates | |
x1, y1, x2, y2 = box.xyxy[0] | |
# Confidence aur class | |
conf = box.conf[0] | |
cls = int(box.cls[0]) | |
class_name = model.names[cls] | |
# Detection object banana | |
detection = { | |
'bbox': [float(x1), float(y1), float(x2-x1), float(y2-y1)], | |
'class': class_name, | |
'confidence': float(conf) | |
} | |
detections.append(detection) | |
return detections | |
# App title | |
st.title("Object Detection App") | |
# Sidebar for settings | |
st.sidebar.title("Settings") | |
# Available models info | |
available_models = [ | |
{'name': 'yolov8s.pt', 'type': 'Object Detection', 'description': 'YOLOv8s (Fastest)'}, | |
{'name': 'yolov9m.pt', 'type': 'Object Detection', 'description': 'YOLOv9m (Highest Accuracy)'}, | |
] | |
# Model selection | |
model_options = {m['name']: f"{m['name']} - {m['description']}" for m in available_models} | |
model_name = st.sidebar.selectbox("Select Model", options=list(model_options.keys()), format_func=lambda x: model_options[x]) | |
# Confidence threshold | |
confidence = st.sidebar.slider("Confidence Threshold", min_value=0.1, max_value=1.0, value=0.25, step=0.05) | |
# Tab selection | |
tab1, tab2 = st.tabs(["Single Image", "Multiple Images"]) | |
with tab1: | |
st.header("Single Image Detection") | |
# Image upload | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
# Display uploaded image | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
# Process button | |
if st.button("Detect Objects"): | |
try: | |
with st.spinner("Detecting objects..."): | |
# Load model | |
model = get_model(model_name) | |
# Convert to numpy array | |
image_np = np.array(image) | |
# Object detection | |
results = model(image_np, conf=confidence) | |
# Process results | |
detections = process_detections(results, model) | |
# Object grouping | |
object_counts = Counter(det['class'] for det in detections) | |
grouped_objects = [ | |
{'class': obj, 'count': count} | |
for obj, count in object_counts.items() | |
] | |
# Display results if any detections found | |
if detections: | |
# Draw bounding boxes on image | |
result_image = image_np.copy() | |
for det in detections: | |
x, y, w, h = [int(val) for val in det['bbox']] | |
cv2.rectangle(result_image, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
cv2.putText(result_image, f"{det['class']} {det['confidence']:.2f}", | |
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
# Show image with detections | |
st.image(result_image, caption="Detection Results", use_column_width=True) | |
# Display summary | |
st.subheader("Detection Summary") | |
for obj in grouped_objects: | |
st.write(f"- {obj['class']}: {obj['count']}") | |
# Display detection details | |
st.subheader("Detection Details") | |
for i, det in enumerate(detections, 1): | |
st.write(f"#{i}: {det['class']} (Confidence: {det['confidence']:.2f})") | |
else: | |
st.info("No objects detected in the image.") | |
except Exception as e: | |
st.error(f"Error processing image: {str(e)}") | |
with tab2: | |
st.header("Multiple Images Detection") | |
uploaded_files = st.file_uploader("Upload multiple images", type=["jpg", "jpeg", "png"], accept_multiple_files=True) | |
if uploaded_files: | |
st.write(f"{len(uploaded_files)} images uploaded") | |
# Process button | |
if st.button("Detect Objects in All Images"): | |
try: | |
with st.spinner("Detecting objects in multiple images..."): | |
# Load model | |
model = get_model(model_name) | |
# Process each image | |
all_detections = [] | |
for i, file in enumerate(uploaded_files): | |
# Read image | |
image = Image.open(file) | |
image_np = np.array(image) | |
# Object detection | |
results = model(image_np, conf=confidence) | |
# Process results | |
detections = process_detections(results, model) | |
all_detections.append(detections) | |
# Create columns for image display | |
col1, col2 = st.columns(2) | |
with col1: | |
st.write(f"Image {i+1}: {file.name}") | |
st.image(image, caption=f"Original - {file.name}", use_column_width=True) | |
with col2: | |
# Draw bounding boxes | |
result_image = image_np.copy() | |
for det in detections: | |
x, y, w, h = [int(val) for val in det['bbox']] | |
cv2.rectangle(result_image, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
cv2.putText(result_image, f"{det['class']} {det['confidence']:.2f}", | |
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
st.image(result_image, caption=f"Detections - {file.name}", use_column_width=True) | |
# Display detections for this image | |
object_counts = Counter(det['class'] for det in detections) | |
st.write("Detected objects:") | |
for obj, count in object_counts.items(): | |
st.write(f"- {obj}: {count}") | |
st.divider() | |
# Overall summary | |
st.subheader("Overall Detection Summary") | |
all_objects = [] | |
for detections in all_detections: | |
all_objects.extend([det['class'] for det in detections]) | |
total_counts = Counter(all_objects) | |
for obj, count in total_counts.items(): | |
st.write(f"- {obj}: {count} (across all images)") | |
except Exception as e: | |
st.error(f"Error processing images: {str(e)}") | |
# About section | |
st.sidebar.markdown("---") | |
st.sidebar.header("About") | |
st.sidebar.info(""" | |
This app uses YOLO models for object detection. | |
- YOLOv8s: Faster detection | |
- YOLOv9m: Higher accuracy | |
""") |