Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -46,13 +46,13 @@ def generate_responses(prompt, history):
|
|
46 |
# Custom sampler task: loop over generator and collect outputs in a list
|
47 |
async def custom_sampler_task():
|
48 |
generated_list = []
|
49 |
-
generator = creative_sampler.generate(wrapped_prompt, max_length=2048, temperature=
|
50 |
for token in generator:
|
51 |
generated_list.append(token)
|
52 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
53 |
|
54 |
custom_output = asyncio.run(custom_sampler_task())
|
55 |
-
standard_output = model1.generate(inputs, max_length=2048, temperature=
|
56 |
# Decode standard output and remove the prompt from the generated response
|
57 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
58 |
|
|
|
46 |
# Custom sampler task: loop over generator and collect outputs in a list
|
47 |
async def custom_sampler_task():
|
48 |
generated_list = []
|
49 |
+
generator = creative_sampler.generate(wrapped_prompt, max_length=2048, temperature=1)
|
50 |
for token in generator:
|
51 |
generated_list.append(token)
|
52 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
53 |
|
54 |
custom_output = asyncio.run(custom_sampler_task())
|
55 |
+
standard_output = model1.generate(inputs, max_length=2048, temperature=1)
|
56 |
# Decode standard output and remove the prompt from the generated response
|
57 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
58 |
|