CompVisProj / app.py
notrey's picture
updating prj
74315dc
raw
history blame
2.7 kB
import streamlit as st
from transformers import pipeline
from PIL import Image
import cv2
import numpy as np
# --- App Title and Description ---
st.title("Real-Time Emotion Detection App")
st.write("""
This app uses a lightweight, pre-trained emotion detection model from Hugging Face to predict emotions
from faces in an image. You can either upload an image or use your webcam to capture an image.
""")
# --- Load the Emotion Detection Model ---
# Cache the model loading so it isn’t reloaded on every app interaction.
@st.cache_resource(show_spinner=False)
def load_emotion_detector():
# Loads the Hugging Face image-classification pipeline with the specified model.
classifier = pipeline("image-classification", model="dima806/facial_emotions_image_detection")
return classifier
classifier = load_emotion_detector()
# --- Sidebar: Select Input Method ---
st.sidebar.header("Select Input Method")
input_method = st.sidebar.radio("Choose one:", ["Upload an Image", "Capture with Webcam"])
# --- Process Image and Perform Inference ---
def predict_emotion(image: Image.Image):
# Optionally, you can perform additional preprocessing (e.g., face detection or cropping) here.
results = classifier(image)
# The pipeline returns a list of dictionaries sorted by score.
top_prediction = results[0]
return top_prediction
# --- Main Section: Handling Input Methods ---
if input_method == "Upload an Image":
uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Open the image file with PIL.
image = Image.open(uploaded_file).convert("RGB")
st.image(image, caption="Uploaded Image", use_column_width=True)
prediction = predict_emotion(image)
st.subheader("Prediction:")
st.write(f"**Emotion:** {prediction['label']}")
st.write(f"**Confidence:** {prediction['score']:.2f}")
elif input_method == "Capture with Webcam":
# st.camera_input returns an image file-like object when a picture is taken.
picture = st.camera_input("Capture an Image")
if picture is not None:
# Load image from the captured file.
image = Image.open(picture).convert("RGB")
st.image(image, caption="Captured Image", use_column_width=True)
prediction = predict_emotion(image)
st.subheader("Prediction:")
st.write(f"**Emotion:** {prediction['label']}")
st.write(f"**Confidence:** {prediction['score']:.2f}")
# --- Optional: Additional Instructions ---
st.write("""
*Note: For best results in real-time detection, consider focusing the camera on your face or uploading a clear face image.*
""")