WillHeld's picture
Update app.py
116ecb1 verified
raw
history blame
878 Bytes
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
checkpoint = "WillHeld/soft-raccoon"
device = "cuda"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
@spaces.GPU(duration=120)
def predict(message, history):
history.append({"role": "user", "content": message})
input_text = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
decoded = tokenizer.decode(outputs[0])
response = decoded.split("<|start_header_id|>assistant<|end_header_id|>\n\n")[-1]
return response
demo = gr.ChatInterface(predict, type="messages")
demo.launch()