Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
import requests | |
# Load the Hugging Face token from environment variables (secrets) | |
token = os.environ.get("KEY2") # Replace "KEY2" with your secret key name | |
# Function to query the Hugging Face API | |
def query_huggingface_api(prompt, max_new_tokens=50, temperature=0.7, top_k=50): | |
model_name = "HuggingFaceH4/zephyr-7b-alpha" # Replace with your preferred model | |
api_url = f"https://api-inference.huggingface.co/models/{model_name}" | |
headers = {"Authorization": f"Bearer {token}"} | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": max_new_tokens, | |
"temperature": temperature, | |
"top_k": top_k, | |
}, | |
} | |
response = requests.post(api_url, headers=headers, json=payload) | |
if response.status_code == 200: | |
return response.json()[0]["generated_text"] | |
else: | |
st.error(f"Error: {response.status_code} - {response.text}") | |
return None | |
# Streamlit App | |
def main(): | |
st.title("Hugging Face API Test") | |
st.write("Enter a prompt and get a response from the model.") | |
# Input prompt | |
prompt = st.text_input("Enter your prompt:") | |
if prompt: | |
st.write("**Prompt:**", prompt) | |
# Query the Hugging Face API | |
response = query_huggingface_api(prompt) | |
if response: | |
st.write("**Response:**", response) | |
if __name__ == "__main__": | |
main() |