text_moderator / app.py
anshu-man853's picture
Update app.py
a7001a4 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# Retrieve the Hugging Face token from environment variables
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
if HF_TOKEN is None:
raise ValueError("HUGGINGFACE_TOKEN is not set. Add it in your Space's secrets.")
# Load model and tokenizer with authentication
MODEL_NAME = "meta-llama/Llama-Guard-3-1B"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN)
# Streamlit UI
st.title("AI Safe Content Checker Tool")
st.write("Enter text below, and the model will check if it's safe.")
# User input
user_input = st.text_area("Enter your text here:")
def check_content(text):
prompt = f"<|user|>\n{text}\n<|assistant|>\n" # Ensure correct formatting for input
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=50)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response # Check the response format to extract category
if st.button("Check Content"):
if user_input:
result = check_content(user_input)
st.subheader("Moderation Result:")
st.write(result) # Print raw result first to analyze format
else:
st.warning("Please enter some text.")