Spaces:
Running
on
T4
Running
on
T4
Change labeling to seconds from tokens
Browse files- app.py +1 -1
- audiocraft/models/musicgen.py +5 -3
app.py
CHANGED
@@ -480,7 +480,7 @@ def ui(**kwargs):
|
|
480 |
dimension = gr.Slider(minimum=-2, maximum=2, value=2, step=1, label="Dimension", info="determines which direction to add new segements of audio. (1 = stack tracks, 2 = lengthen, -2..0 = ?)", interactive=True)
|
481 |
with gr.Row():
|
482 |
topk = gr.Number(label="Top-k", value=280, precision=0, interactive=True)
|
483 |
-
topp = gr.Number(label="Top-p", value=1150, precision=0, interactive=True)
|
484 |
temperature = gr.Number(label="Randomness Temperature", value=0.7, precision=None, interactive=True)
|
485 |
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.5, precision=None, interactive=True)
|
486 |
with gr.Row():
|
|
|
480 |
dimension = gr.Slider(minimum=-2, maximum=2, value=2, step=1, label="Dimension", info="determines which direction to add new segements of audio. (1 = stack tracks, 2 = lengthen, -2..0 = ?)", interactive=True)
|
481 |
with gr.Row():
|
482 |
topk = gr.Number(label="Top-k", value=280, precision=0, interactive=True)
|
483 |
+
topp = gr.Number(label="Top-p", value=1150, precision=0, interactive=True, info="overwrites Top-k if not zero")
|
484 |
temperature = gr.Number(label="Randomness Temperature", value=0.7, precision=None, interactive=True)
|
485 |
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.5, precision=None, interactive=True)
|
486 |
with gr.Row():
|
audiocraft/models/musicgen.py
CHANGED
@@ -411,15 +411,17 @@ class MusicGen:
|
|
411 |
|
412 |
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
|
413 |
generated_tokens += current_gen_offset
|
|
|
|
|
414 |
if self._progress_callback is not None:
|
415 |
# Note that total_gen_len might be quite wrong depending on the
|
416 |
# codebook pattern used, but with delay it is almost accurate.
|
417 |
-
self._progress_callback((generated_tokens /
|
418 |
if progress_callback is not None:
|
419 |
# Update Gradio progress bar
|
420 |
-
progress_callback((generated_tokens /
|
421 |
if progress:
|
422 |
-
print(f'{generated_tokens:
|
423 |
|
424 |
if prompt_tokens is not None:
|
425 |
assert max_prompt_len >= prompt_tokens.shape[-1], \
|
|
|
411 |
|
412 |
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
|
413 |
generated_tokens += current_gen_offset
|
414 |
+
generated_tokens /= 50
|
415 |
+
tokens_to_generate /= 50
|
416 |
if self._progress_callback is not None:
|
417 |
# Note that total_gen_len might be quite wrong depending on the
|
418 |
# codebook pattern used, but with delay it is almost accurate.
|
419 |
+
self._progress_callback((generated_tokens / tokens_to_generate), f"Generated {generated_tokens}/{tokens_to_generate} seconds")
|
420 |
if progress_callback is not None:
|
421 |
# Update Gradio progress bar
|
422 |
+
progress_callback((generated_tokens / tokens_to_generate), f"Generated {generated_tokens}/{tokens_to_generate} seconds")
|
423 |
if progress:
|
424 |
+
print(f'{generated_tokens: 6.2f} / {tokens_to_generate: 6.2f}', end='\r')
|
425 |
|
426 |
if prompt_tokens is not None:
|
427 |
assert max_prompt_len >= prompt_tokens.shape[-1], \
|