mohitrajdeo
commited on
Commit
·
1081520
1
Parent(s):
fe3e720
refactor(app.py): improve token handling and error management for Hugging Face integration
Browse filesEnhance the robustness of the Hugging Face token handling by introducing a fallback mechanism using Streamlit secrets or environment variables. Additionally, improve error management during model loading to provide better user feedback in case of failures.
app.py
CHANGED
@@ -676,14 +676,37 @@ from transformers import pipeline, AutoModelForSequenceClassification, AutoToken
|
|
676 |
import os
|
677 |
from huggingface_hub import login
|
678 |
|
679 |
-
login(token=os.environ.get("HF_TOKEN"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
|
681 |
if selected == 'Mental-Analysis':
|
682 |
# Load the Hugging Face model
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
|
|
|
|
|
|
687 |
# Sidebar with title and markdown
|
688 |
st.sidebar.title("🧠 Mental Health Analysis")
|
689 |
st.sidebar.markdown("""
|
|
|
676 |
import os
|
677 |
from huggingface_hub import login
|
678 |
|
679 |
+
# login(token=os.environ.get("HF_TOKEN"))
|
680 |
+
|
681 |
+
try:
|
682 |
+
# For Streamlit Cloud or Spaces deployment
|
683 |
+
hf_token = st.secrets["HF_TOKEN"]
|
684 |
+
except:
|
685 |
+
# Fallback to environment variables for local development
|
686 |
+
hf_token = os.environ.get("HF_TOKEN")
|
687 |
+
|
688 |
+
if hf_token:
|
689 |
+
login(token=hf_token)
|
690 |
+
else:
|
691 |
+
st.warning("Hugging Face token not found. Some features may not work correctly.")
|
692 |
+
|
693 |
+
|
694 |
+
# if selected == 'Mental-Analysis':
|
695 |
+
# # Load the Hugging Face model
|
696 |
+
# model_name = "mental/mental-roberta-base"
|
697 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
698 |
+
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
699 |
+
|
700 |
|
701 |
if selected == 'Mental-Analysis':
|
702 |
# Load the Hugging Face model
|
703 |
+
try:
|
704 |
+
model_name = "mental/mental-roberta-base"
|
705 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
706 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
707 |
+
except Exception as e:
|
708 |
+
st.error(f"Error loading mental health model: {e}")
|
709 |
+
st.info("Please check your Hugging Face token configuration.")
|
710 |
# Sidebar with title and markdown
|
711 |
st.sidebar.title("🧠 Mental Health Analysis")
|
712 |
st.sidebar.markdown("""
|