WillHeld commited on
Commit
26579f6
·
verified ·
1 Parent(s): c96be82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -10,12 +10,11 @@ model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
- input_text = tokenizer.apply_chat_template(history, tokenize=False)
14
- print(input_text)
15
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
16
  outputs = model.generate(inputs, max_new_tokens=100, temperature=0.7, top_p=0.9, do_sample=True)
17
  decoded = tokenizer.decode(outputs[0])
18
- response = decoded
19
  return response
20
 
21
  demo = gr.ChatInterface(predict, type="messages")
 
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
+ input_text = tokenizer.apply_chat_template(history, tokenize=False) + "<|start_header_id|>assistant<|end_header_id|>\n\n"
 
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
  outputs = model.generate(inputs, max_new_tokens=100, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
17
+ response = decoded.split("<|start_header_id|>assistant<|end_header_id|>\n\n")[-1]
18
  return response
19
 
20
  demo = gr.ChatInterface(predict, type="messages")