File size: 2,572 Bytes
be841b7
 
 
6ee12c5
9c5f577
be841b7
 
 
 
 
 
1edd41b
 
be841b7
 
1edd41b
 
 
 
 
 
 
 
 
be841b7
 
 
 
 
 
 
 
 
 
 
1edd41b
 
 
 
 
 
be841b7
 
 
 
1edd41b
9c5f577
 
 
be841b7
9c5f577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9649350
9c5f577
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import streamlit as st
import numpy as np
from PIL import Image
import mediapipe as mp
import cv2

# Initialize MediaPipe
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils

# Function to recognize letters based on hand landmarks
def get_letter(landmarks):
    thumb_tip = landmarks[4]
    index_tip = landmarks[8]

    # Example conditions for gesture recognition (you can expand this logic)
    if thumb_tip.y < index_tip.y:  # Example condition for 'A'
        return 'A'
    elif thumb_tip.y > landmarks[0].y:  # Example condition for 'B'
        return 'B'
    # Add more conditions for other letters
    # Return 'Unknown' if no match
    return 'Unknown'

# Process hand gestures
def process_hand(image):
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = hands.process(image_rgb)
    
    if results.multi_hand_landmarks:
        for hand_landmarks in results.multi_hand_landmarks:
            mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
            landmarks = hand_landmarks.landmark
            
            # Get recognized letter
            letter = get_letter(landmarks)
            
            # Draw the recognized letter on the image
            cv2.putText(image, letter, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    return image

# Streamlit Interface
st.title("AI Sign Language Interpreter")
st.write("Make a gesture in front of your camera, or upload an image.")

# Use Streamlit's built-in camera input
video_input = st.camera_input("Take a picture with your webcam")

if video_input is not None:
    # Read the video input
    image = Image.open(video_input)
    # Convert the image to a numpy array
    image_np = np.array(image)
    
    # Process the hand gesture
    processed_image = process_hand(image_np)
    
    # Display the processed output
    st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Processed Output", use_column_width=True)

# Upload image section
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])

if uploaded_file is not None:
    uploaded_image = Image.open(uploaded_file)
    uploaded_image_np = np.array(uploaded_image)
    
    # Process the hand gesture
    processed_uploaded_image = process_hand(uploaded_image_np)
    
    # Display the processed output
    st.image(cv2.cvtColor(processed_uploaded_image, cv2.COLOR_BGR2RGB), caption="Processed Uploaded Image", use_column_width=True)