Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,15 +2,16 @@ import streamlit as st
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import os
|
4 |
|
|
|
5 |
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
6 |
|
7 |
if HF_TOKEN is None:
|
8 |
raise ValueError("HUGGINGFACE_TOKEN is not set. Add it in your Space's secrets.")
|
9 |
|
10 |
-
# Load model and tokenizer
|
11 |
MODEL_NAME = "meta-llama/Llama-Guard-3-1B"
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
13 |
-
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
14 |
|
15 |
# Streamlit UI
|
16 |
st.title("AI Safe Content Checker Tool")
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import os
|
4 |
|
5 |
+
# Retrieve the Hugging Face token from environment variables
|
6 |
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
7 |
|
8 |
if HF_TOKEN is None:
|
9 |
raise ValueError("HUGGINGFACE_TOKEN is not set. Add it in your Space's secrets.")
|
10 |
|
11 |
+
# Load model and tokenizer with authentication
|
12 |
MODEL_NAME = "meta-llama/Llama-Guard-3-1B"
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN)
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN)
|
15 |
|
16 |
# Streamlit UI
|
17 |
st.title("AI Safe Content Checker Tool")
|