Spaces:
Sleeping
Sleeping
token size
Browse files- vecalign/plan2align.py +2 -2
vecalign/plan2align.py
CHANGED
@@ -310,7 +310,7 @@ def translate_with_deepinfra(model, tokenizer, device, source_sentence, buffer,
|
|
310 |
inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
|
311 |
outputs = model.generate(
|
312 |
**inputs,
|
313 |
-
max_new_tokens=
|
314 |
temperature=0.7,
|
315 |
top_p=0.9,
|
316 |
do_sample=True
|
@@ -407,7 +407,7 @@ def final_translate_with_deepinfra(model, tokenizer, device, source_sentence, so
|
|
407 |
inputs = tokenizer(rewrite_prompt, return_tensors="pt").to(device)
|
408 |
outputs = model.generate(
|
409 |
**inputs,
|
410 |
-
max_new_tokens=
|
411 |
temperature=0.7,
|
412 |
top_p=0.9,
|
413 |
do_sample=True
|
|
|
310 |
inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
|
311 |
outputs = model.generate(
|
312 |
**inputs,
|
313 |
+
max_new_tokens=2048,
|
314 |
temperature=0.7,
|
315 |
top_p=0.9,
|
316 |
do_sample=True
|
|
|
407 |
inputs = tokenizer(rewrite_prompt, return_tensors="pt").to(device)
|
408 |
outputs = model.generate(
|
409 |
**inputs,
|
410 |
+
max_new_tokens=2048,
|
411 |
temperature=0.7,
|
412 |
top_p=0.9,
|
413 |
do_sample=True
|