notrey commited on
Commit
74315dc
·
1 Parent(s): d42bc4a

updating prj

Browse files
Files changed (2) hide show
  1. app.py +62 -47
  2. requirements.txt +3 -4
app.py CHANGED
@@ -1,49 +1,64 @@
 
 
 
1
  import cv2
2
- import gradio as gr
3
  import numpy as np
4
- from fer import FER
5
- from moviepy.editor import VideoFileClip
6
-
7
-
8
- # Initialize the pre-trained detector once so you don't reinitialize on every function call.
9
- detector = FER(mtcnn=True) # Optionally, you can set mtcnn to False to use a faster (but less accurate) cascade.
10
-
11
- def emotion_recognition(image):
12
- """
13
- Process the input image, detect emotions on faces,
14
- and annotate the image with bounding boxes and emotion labels.
15
-
16
- Parameters:
17
- image (numpy.ndarray): Input image (RGB).
18
-
19
- Returns:
20
- numpy.ndarray: Annotated image with emotion labels.
21
- """
22
- # fer works with RGB images which is what Gradio provides by default.
23
- results = detector.detect_emotions(image)
24
- annotated_image = image.copy()
25
-
26
- # Loop through each detected face
27
- for face in results:
28
- (x, y, w, h) = face["box"]
29
- # Get the dominant emotion for the detected face
30
- dominant_emotion = max(face["emotions"].items(), key=lambda item: item[1])[0]
31
- # Draw bounding box around face
32
- cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
33
- # Put the emotion label above the bounding box
34
- cv2.putText(annotated_image, dominant_emotion, (x, y - 10),
35
- cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
36
- return annotated_image
37
-
38
- # Create a Gradio Interface
39
- interface = gr.Interface(
40
- fn=emotion_recognition,
41
- inputs=gr.Image(type="numpy", label="Input Image"),
42
- outputs=gr.Image(type="numpy", label="Annotated Image"),
43
- title="Facial Emotion Recognition",
44
- description="Upload an image and let the app detect and annotate facial emotions."
45
- )
46
-
47
- # Run the app locally
48
- if __name__ == "__main__":
49
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ from PIL import Image
4
  import cv2
 
5
  import numpy as np
6
+
7
+ # --- App Title and Description ---
8
+ st.title("Real-Time Emotion Detection App")
9
+ st.write("""
10
+ This app uses a lightweight, pre-trained emotion detection model from Hugging Face to predict emotions
11
+ from faces in an image. You can either upload an image or use your webcam to capture an image.
12
+ """)
13
+
14
+ # --- Load the Emotion Detection Model ---
15
+ # Cache the model loading so it isn’t reloaded on every app interaction.
16
+ @st.cache_resource(show_spinner=False)
17
+ def load_emotion_detector():
18
+ # Loads the Hugging Face image-classification pipeline with the specified model.
19
+ classifier = pipeline("image-classification", model="dima806/facial_emotions_image_detection")
20
+ return classifier
21
+
22
+ classifier = load_emotion_detector()
23
+
24
+ # --- Sidebar: Select Input Method ---
25
+ st.sidebar.header("Select Input Method")
26
+ input_method = st.sidebar.radio("Choose one:", ["Upload an Image", "Capture with Webcam"])
27
+
28
+ # --- Process Image and Perform Inference ---
29
+ def predict_emotion(image: Image.Image):
30
+ # Optionally, you can perform additional preprocessing (e.g., face detection or cropping) here.
31
+ results = classifier(image)
32
+ # The pipeline returns a list of dictionaries sorted by score.
33
+ top_prediction = results[0]
34
+ return top_prediction
35
+
36
+ # --- Main Section: Handling Input Methods ---
37
+
38
+ if input_method == "Upload an Image":
39
+ uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
40
+ if uploaded_file is not None:
41
+ # Open the image file with PIL.
42
+ image = Image.open(uploaded_file).convert("RGB")
43
+ st.image(image, caption="Uploaded Image", use_column_width=True)
44
+ prediction = predict_emotion(image)
45
+ st.subheader("Prediction:")
46
+ st.write(f"**Emotion:** {prediction['label']}")
47
+ st.write(f"**Confidence:** {prediction['score']:.2f}")
48
+
49
+ elif input_method == "Capture with Webcam":
50
+ # st.camera_input returns an image file-like object when a picture is taken.
51
+ picture = st.camera_input("Capture an Image")
52
+ if picture is not None:
53
+ # Load image from the captured file.
54
+ image = Image.open(picture).convert("RGB")
55
+ st.image(image, caption="Captured Image", use_column_width=True)
56
+ prediction = predict_emotion(image)
57
+ st.subheader("Prediction:")
58
+ st.write(f"**Emotion:** {prediction['label']}")
59
+ st.write(f"**Confidence:** {prediction['score']:.2f}")
60
+
61
+ # --- Optional: Additional Instructions ---
62
+ st.write("""
63
+ *Note: For best results in real-time detection, consider focusing the camera on your face or uploading a clear face image.*
64
+ """)
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
- gradio
 
 
2
  opencv-python
3
- fer
4
- numpy
5
- moviepy
 
1
+ streamlit
2
+ transformers
3
+ pillow
4
  opencv-python