MohamedRashad commited on
Commit
1fa4539
·
1 Parent(s): 640072b

Remove debug print statement and enhance FramePack description in app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -43,7 +43,6 @@ vae.enable_slicing()
43
  vae.enable_tiling()
44
 
45
  transformer.high_quality_fp32_output_for_inference = True
46
- print('transformer.high_quality_fp32_output_for_inference = True')
47
 
48
  transformer.to(dtype=torch.bfloat16)
49
  vae.to(dtype=torch.float16)
@@ -316,10 +315,8 @@ block = gr.Blocks(css=css).queue()
316
  with block:
317
  gr.Markdown('''
318
  # [FramePack](https://github.com/lllyasviel/FramePack)
319
-
320
- ## Image to Video Animation Tool
321
-
322
- FramePack transforms still images into smooth, natural-looking animations using AI. Upload a portrait or character image and describe the motion you want to see.
323
 
324
  ### How to use:
325
  1. **Upload an image** - Best results with clear, well-lit portraits
@@ -345,7 +342,10 @@ with block:
345
  use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
346
 
347
  n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
348
- seed = gr.Number(label="Seed", value=31337, precision=0)
 
 
 
349
 
350
  total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
351
  latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
 
43
  vae.enable_tiling()
44
 
45
  transformer.high_quality_fp32_output_for_inference = True
 
46
 
47
  transformer.to(dtype=torch.bfloat16)
48
  vae.to(dtype=torch.float16)
 
315
  with block:
316
  gr.Markdown('''
317
  # [FramePack](https://github.com/lllyasviel/FramePack)
318
+
319
+ This implementation is based on the `demo_gradio.py` that [Lvmin Zhang](https://github.com/lllyasviel) provided
 
 
320
 
321
  ### How to use:
322
  1. **Upload an image** - Best results with clear, well-lit portraits
 
342
  use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
343
 
344
  n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
345
+ with gr.Row():
346
+ seed = gr.Number(label="Seed", value=31337, precision=0, scale=4)
347
+ random_seed_button = gr.Button(value="🔁", variant="primary", scale=1)
348
+ random_seed_button.click(lambda: int(torch.randint(0, 2**32 - 1, (1,)).item()), inputs=[], outputs=seed, show_progress=False, queue=False)
349
 
350
  total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
351
  latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change