Spaces:
Running
Running
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
import torch | |
import gradio as gr | |
model_id = "NousResearch/zephyr-1.3b" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.float32 | |
) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def generate_text(prompt): | |
output = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7) | |
return output[0]["generated_text"] | |
demo = gr.Interface( | |
fn=generate_text, | |
inputs=gr.Textbox(lines=4, placeholder="Escribe algo..."), | |
outputs="text", | |
title="Generador de texto - Zephyr 1.3B (CPU compatible)" | |
) | |
demo.launch() |