WillHeld commited on
Commit
888f559
·
verified ·
1 Parent(s): 03cb4ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -12,7 +12,7 @@ def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
  input_text = tokenizer.apply_chat_template(history, tokenize=False) + "<|start_header_id|>assistant<|end_header_id|>\n\n"
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
- outputs = model.generate(inputs, max_new_tokens=100, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
17
  response = decoded.split("<|start_header_id|>assistant<|end_header_id|>\n\n")[-1]
18
  return response
 
12
  history.append({"role": "user", "content": message})
13
  input_text = tokenizer.apply_chat_template(history, tokenize=False) + "<|start_header_id|>assistant<|end_header_id|>\n\n"
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
+ outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
17
  response = decoded.split("<|start_header_id|>assistant<|end_header_id|>\n\n")[-1]
18
  return response