|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
model_name = "teckmill/Jaleah-ai" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
def generate_text(prompt): |
|
|
|
generated = generator(prompt, max_length=50) |
|
return generated[0]['generated_text'] |
|
|
|
|
|
st.title("Jaleah AI - Text Generation") |
|
st.write("Enter a prompt to generate text:") |
|
|
|
|
|
prompt = st.text_area("Prompt", "Once upon a time...") |
|
|
|
|
|
if st.button("Generate Text"): |
|
if prompt: |
|
generated_text = generate_text(prompt) |
|
st.subheader("Generated Text") |
|
st.write(generated_text) |
|
else: |
|
st.warning("Please enter a prompt to generate text.") |
|
|