Spaces:
Sleeping
Sleeping
Try to produce output
Browse files
app.py
CHANGED
@@ -143,6 +143,7 @@ def diffusion_chat(question, eot_weight, max_it, sharpness):
|
|
143 |
final_tokens = tokenizer.convert_ids_to_tokens(current_tokens[answer_start:])
|
144 |
final_tokens = [tok for tok in final_tokens if tokenizer.convert_tokens_to_ids(tok) != eot_token_id]
|
145 |
final_output = tokenizer.convert_tokens_to_string(final_tokens)
|
|
|
146 |
yield f"<b>Final Output (after {i+1} iterations):</b><br>" + final_output
|
147 |
|
148 |
# --- Gradio Interface ---
|
@@ -161,6 +162,7 @@ demo = gr.Interface(
|
|
161 |
],
|
162 |
outputs=gr.HTML(label="Diffusion Output"),
|
163 |
title="Diffusion Language Model Chat",
|
|
|
164 |
description="This interface runs a diffusion-based language model to generate answers progressively."
|
165 |
)
|
166 |
|
|
|
143 |
final_tokens = tokenizer.convert_ids_to_tokens(current_tokens[answer_start:])
|
144 |
final_tokens = [tok for tok in final_tokens if tokenizer.convert_tokens_to_ids(tok) != eot_token_id]
|
145 |
final_output = tokenizer.convert_tokens_to_string(final_tokens)
|
146 |
+
print(final_output)
|
147 |
yield f"<b>Final Output (after {i+1} iterations):</b><br>" + final_output
|
148 |
|
149 |
# --- Gradio Interface ---
|
|
|
162 |
],
|
163 |
outputs=gr.HTML(label="Diffusion Output"),
|
164 |
title="Diffusion Language Model Chat",
|
165 |
+
theme="default",
|
166 |
description="This interface runs a diffusion-based language model to generate answers progressively."
|
167 |
)
|
168 |
|