Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
3 |
from backtrack_sampler import BacktrackSampler, CreativeWritingStrategy
|
4 |
from backtrack_sampler.provider.transformers_provider import TransformersProvider
|
5 |
import torch
|
6 |
-
import asyncio
|
7 |
import spaces
|
8 |
|
9 |
description = """## Compare Creative Writing: Custom Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
@@ -53,7 +52,7 @@ def generate_responses(prompt, history):
|
|
53 |
generated_list.append(token)
|
54 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
55 |
standard_output = standard_task()
|
56 |
-
custom_output =
|
57 |
# Decode standard output and remove the prompt from the generated response
|
58 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
59 |
|
|
|
3 |
from backtrack_sampler import BacktrackSampler, CreativeWritingStrategy
|
4 |
from backtrack_sampler.provider.transformers_provider import TransformersProvider
|
5 |
import torch
|
|
|
6 |
import spaces
|
7 |
|
8 |
description = """## Compare Creative Writing: Custom Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
|
|
52 |
generated_list.append(token)
|
53 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
54 |
standard_output = standard_task()
|
55 |
+
custom_output = custom_sampler_task()
|
56 |
# Decode standard output and remove the prompt from the generated response
|
57 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
58 |
|