File size: 2,433 Bytes
be841b7
 
 
6ee12c5
 
 
 
 
 
 
be841b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
import numpy as np
from PIL import Image
import mediapipe as mp

# Try importing OpenCV and handle ImportError
try:
    import cv2
except ImportError:
    st.error("OpenCV is not installed. Please check the requirements.")

# Initialize MediaPipe
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils

# Function to recognize gestures
def get_gesture(landmarks):
    thumb_tip = landmarks[4]
    index_tip = landmarks[8]
    
    if thumb_tip.y < index_tip.y:
        return 'Thumbs Up'
    elif thumb_tip.y > landmarks[0].y:
        return 'Thumbs Down'
    else:
        return 'Other Gesture'

# Process hand gestures
def process_hand(image):
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = hands.process(image_rgb)
    
    if results.multi_hand_landmarks:
        for hand_landmarks in results.multi_hand_landmarks:
            mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
            
            landmarks = hand_landmarks.landmark
            gesture = get_gesture(landmarks)
            
            # Draw the recognized gesture on the image
            cv2.putText(image, gesture, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
    
    return image

# Streamlit Interface
st.title("AI Sign Language Interpreter")
st.write("Make a thumbs up or thumbs down gesture in front of your camera.")

# Upload or capture an image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
    image = np.array(Image.open(uploaded_file))
    processed_image = process_hand(image)
    
    # Convert the image back to RGB for display
    st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Processed Output", use_column_width=True)
else:
    st.write("Please upload an image.")

# Activate the webcam feed for live processing
if st.button("Start Webcam"):
    run = st.checkbox('Running')
    cam = cv2.VideoCapture(0)
    stframe = st.empty()

    while run:
        ret, frame = cam.read()
        if not ret:
            st.write("Failed to capture image")
            break
        
        processed_frame = process_hand(frame)
        stframe.image(cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB), channels="RGB", use_column_width=True)
        
    cam.release()