Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from backtrack_sampler import BacktrackSampler, CreativeWritingStrategy
|
|
4 |
from backtrack_sampler.provider.transformers_provider import TransformersProvider
|
5 |
import torch
|
6 |
import spaces
|
|
|
7 |
|
8 |
description = """## Compare Creative Writing: Custom Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
9 |
This is a demo of [Backtrack Sampler](https://github.com/Mihaiii/backtrack_sampler) using one of its algorithms named "Creative Writing Strategy".
|
@@ -41,8 +42,6 @@ def generate_responses(prompt, history):
|
|
41 |
|
42 |
#already has special tokens
|
43 |
inputs = tokenizer.encode(wrapped_prompt, add_special_tokens=False, return_tensors="pt").to("cuda")
|
44 |
-
def standard_task():
|
45 |
-
return model1.generate(inputs, max_length=2048, temperature=0.7)
|
46 |
|
47 |
# Custom sampler task: loop over generator and collect outputs in a list
|
48 |
async def custom_sampler_task():
|
@@ -51,8 +50,9 @@ def generate_responses(prompt, history):
|
|
51 |
for token in generator:
|
52 |
generated_list.append(token)
|
53 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
54 |
-
|
55 |
-
custom_output = custom_sampler_task()
|
|
|
56 |
# Decode standard output and remove the prompt from the generated response
|
57 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
58 |
|
|
|
4 |
from backtrack_sampler.provider.transformers_provider import TransformersProvider
|
5 |
import torch
|
6 |
import spaces
|
7 |
+
import asyncio
|
8 |
|
9 |
description = """## Compare Creative Writing: Custom Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
10 |
This is a demo of [Backtrack Sampler](https://github.com/Mihaiii/backtrack_sampler) using one of its algorithms named "Creative Writing Strategy".
|
|
|
42 |
|
43 |
#already has special tokens
|
44 |
inputs = tokenizer.encode(wrapped_prompt, add_special_tokens=False, return_tensors="pt").to("cuda")
|
|
|
|
|
45 |
|
46 |
# Custom sampler task: loop over generator and collect outputs in a list
|
47 |
async def custom_sampler_task():
|
|
|
50 |
for token in generator:
|
51 |
generated_list.append(token)
|
52 |
return tokenizer.decode(generated_list, skip_special_tokens=True)
|
53 |
+
|
54 |
+
custom_output = asyncio.run(custom_sampler_task())
|
55 |
+
standard_output = model1.generate(inputs, max_length=2048, temperature=0.7)
|
56 |
# Decode standard output and remove the prompt from the generated response
|
57 |
standard_response = tokenizer.decode(standard_output[0][len(inputs[0]):], skip_special_tokens=True)
|
58 |
|