Spaces:
Sleeping
Sleeping
import streamlit as st | |
import numpy as np | |
from PIL import Image | |
import mediapipe as mp | |
import cv2 | |
# Initialize MediaPipe | |
mp_hands = mp.solutions.hands | |
hands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5) | |
mp_drawing = mp.solutions.drawing_utils | |
# Function to recognize letters based on hand landmarks | |
def get_letter(landmarks): | |
thumb_tip = landmarks[4] | |
index_tip = landmarks[8] | |
# Example conditions for gesture recognition (you can expand this logic) | |
if thumb_tip.y < index_tip.y: # Example condition for 'A' | |
return 'A' | |
elif thumb_tip.y > landmarks[0].y: # Example condition for 'B' | |
return 'B' | |
# Add more conditions for other letters | |
# Return 'Unknown' if no match | |
return 'Unknown' | |
# Process hand gestures | |
def process_hand(image): | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
results = hands.process(image_rgb) | |
if results.multi_hand_landmarks: | |
for hand_landmarks in results.multi_hand_landmarks: | |
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS) | |
landmarks = hand_landmarks.landmark | |
# Get recognized letter | |
letter = get_letter(landmarks) | |
# Draw the recognized letter on the image | |
cv2.putText(image, letter, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
return image | |
# Streamlit Interface | |
st.title("AI Sign Language Interpreter") | |
st.write("Make a gesture in front of your camera, or upload an image.") | |
# Use Streamlit's built-in camera input | |
video_input = st.camera_input("Take a picture with your webcam") | |
if video_input is not None: | |
# Read the video input | |
image = Image.open(video_input) | |
# Convert the image to a numpy array | |
image_np = np.array(image) | |
# Process the hand gesture | |
processed_image = process_hand(image_np) | |
# Display the processed output | |
st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Processed Output", use_column_width=True) | |
# Upload image section | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
uploaded_image = Image.open(uploaded_file) | |
uploaded_image_np = np.array(uploaded_image) | |
# Process the hand gesture | |
processed_uploaded_image = process_hand(uploaded_image_np) | |
# Display the processed output | |
st.image(cv2.cvtColor(processed_uploaded_image, cv2.COLOR_BGR2RGB), caption="Processed Uploaded Image", use_column_width=True) | |