Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
import mediapipe as mp
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
# Initialize MediaPipe
|
8 |
+
mp_hands = mp.solutions.hands
|
9 |
+
hands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5)
|
10 |
+
mp_drawing = mp.solutions.drawing_utils
|
11 |
+
|
12 |
+
# Function to recognize gestures
|
13 |
+
def get_gesture(landmarks):
|
14 |
+
thumb_tip = landmarks[4]
|
15 |
+
index_tip = landmarks[8]
|
16 |
+
|
17 |
+
if thumb_tip.y < index_tip.y:
|
18 |
+
return 'Thumbs Up'
|
19 |
+
elif thumb_tip.y > landmarks[0].y:
|
20 |
+
return 'Thumbs Down'
|
21 |
+
else:
|
22 |
+
return 'Other Gesture'
|
23 |
+
|
24 |
+
# Process hand gestures
|
25 |
+
def process_hand(image):
|
26 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
27 |
+
results = hands.process(image_rgb)
|
28 |
+
|
29 |
+
if results.multi_hand_landmarks:
|
30 |
+
for hand_landmarks in results.multi_hand_landmarks:
|
31 |
+
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
|
32 |
+
|
33 |
+
landmarks = hand_landmarks.landmark
|
34 |
+
gesture = get_gesture(landmarks)
|
35 |
+
|
36 |
+
# Draw the recognized gesture on the image
|
37 |
+
cv2.putText(image, gesture, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
38 |
+
|
39 |
+
return image
|
40 |
+
|
41 |
+
# Streamlit Interface
|
42 |
+
st.title("AI Sign Language Interpreter")
|
43 |
+
st.write("Make a thumbs up or thumbs down gesture in front of your camera.")
|
44 |
+
|
45 |
+
# Upload or capture an image
|
46 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
47 |
+
if uploaded_file is not None:
|
48 |
+
image = np.array(Image.open(uploaded_file))
|
49 |
+
processed_image = process_hand(image)
|
50 |
+
|
51 |
+
# Convert the image back to RGB for display
|
52 |
+
st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Processed Output", use_column_width=True)
|
53 |
+
else:
|
54 |
+
st.write("Please upload an image.")
|
55 |
+
|
56 |
+
# Activate the webcam feed for live processing
|
57 |
+
if st.button("Start Webcam"):
|
58 |
+
run = st.checkbox('Running')
|
59 |
+
cam = cv2.VideoCapture(0)
|
60 |
+
stframe = st.empty()
|
61 |
+
|
62 |
+
while run:
|
63 |
+
ret, frame = cam.read()
|
64 |
+
if not ret:
|
65 |
+
st.write("Failed to capture image")
|
66 |
+
break
|
67 |
+
|
68 |
+
processed_frame = process_hand(frame)
|
69 |
+
stframe.image(cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB), channels="RGB", use_column_width=True)
|
70 |
+
|
71 |
+
cam.release()
|