Spaces:
Running
Running
File size: 709 Bytes
fd97d86 badbc70 a254524 badbc70 88f72c8 fd97d86 badbc70 fd97d86 0287d57 badbc70 0287d57 badbc70 fd97d86 badbc70 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import gradio as gr
model_id = "NousResearch/zephyr-1.3b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float32
)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def generate_text(prompt):
output = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
return output[0]["generated_text"]
demo = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=4, placeholder="Escribe algo..."),
outputs="text",
title="Generador de texto - Zephyr 1.3B (CPU compatible)"
)
demo.launch() |