sumanthd commited on
Commit
075cff2
·
1 Parent(s): 70de837
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -61,7 +61,7 @@ def generate_for_examples(
61
  conversation = []
62
  conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
63
 
64
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
65
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
66
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
67
  input_ids = input_ids.to(model.device)
@@ -94,7 +94,7 @@ def generate(
94
  conversation = []
95
  conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
96
 
97
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
98
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
99
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
100
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
 
61
  conversation = []
62
  conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
63
 
64
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt", add_generation_prompt=True)
65
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
66
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
67
  input_ids = input_ids.to(model.device)
 
94
  conversation = []
95
  conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
96
 
97
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt", add_generation_prompt=True)
98
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
99
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
100
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")