Spaces:
Running
on
Zero
Running
on
Zero
fix bug
Browse files
app.py
CHANGED
@@ -61,7 +61,7 @@ def generate_for_examples(
|
|
61 |
conversation = []
|
62 |
conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
|
63 |
|
64 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
65 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
66 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
67 |
input_ids = input_ids.to(model.device)
|
@@ -94,7 +94,7 @@ def generate(
|
|
94 |
conversation = []
|
95 |
conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
|
96 |
|
97 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
98 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
99 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
100 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
|
|
61 |
conversation = []
|
62 |
conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
|
63 |
|
64 |
+
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt", add_generation_prompt=True)
|
65 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
66 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
67 |
input_ids = input_ids.to(model.device)
|
|
|
94 |
conversation = []
|
95 |
conversation.append({"role": "user", "content": f"Translate the following text to {tgt_lang}: {message}"})
|
96 |
|
97 |
+
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt", add_generation_prompt=True)
|
98 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
99 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
100 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|