WillHeld commited on
Commit
ba9ca24
·
verified ·
1 Parent(s): f3a7933

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -10,11 +10,11 @@ model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
- input_text = tokenizer.apply_chat_template(history, tokenize=False)
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
  outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
17
- response = decoded.split("<|start_header_id|>assistant<|end_header_id|>\n\n")[-1]
18
  return response
19
 
20
  demo = gr.ChatInterface(predict, type="messages")
 
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
+ input_text = tokenizer.apply_chat_template(history, tokenize=False) + "<|assistant|>"
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
  outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
17
+ response = decoded.split("<|assistant|>")[-1]
18
  return response
19
 
20
  demo = gr.ChatInterface(predict, type="messages")