Jaleah-ai / app.py
teckmill's picture
Create app.py
e8fe080 verified
raw
history blame
1.08 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load the model and tokenizer from Hugging Face Hub
model_name = "teckmill/Jaleah-ai" # Replace with your model repo name if needed
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Initialize the pipeline
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
def generate_text(prompt):
# Generate text using the model
generated = generator(prompt, max_length=50)
return generated[0]['generated_text']
# Streamlit UI
st.title("Jaleah AI - Text Generation")
st.write("Enter a prompt to generate text:")
# Text input for the user to enter a prompt
prompt = st.text_area("Prompt", "Once upon a time...")
# Button to trigger the model inference
if st.button("Generate Text"):
if prompt:
generated_text = generate_text(prompt)
st.subheader("Generated Text")
st.write(generated_text)
else:
st.warning("Please enter a prompt to generate text.")