QHL067 commited on
Commit
9c9cba4
·
1 Parent(s): 035c9fd
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -277,7 +277,7 @@ def infer_tab2(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_infer
277
  default_interpolation = 3
278
  return infer(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, default_interpolation, operation_mode)
279
 
280
- examples = [
281
  ["A robot cooking dinner in the kitchen", "An orange cat wearing sunglasses on a ship"],
282
  ]
283
 
@@ -452,12 +452,12 @@ css = """
452
  # )
453
 
454
  with gr.Blocks(css=css) as demo:
 
 
 
455
  with gr.Tabs():
456
  # --- Tab 1: Interpolation Mode (no operation_mode) ---
457
- gr.Markdown("# CrossFlow")
458
- gr.Markdown("[CrossFlow](https://cross-flow.github.io/) directly transforms text representations into images for text-to-image generation, without the need for both the noise distribution and conditioning mechanism.")
459
- gr.Markdown("This direct mapping enables meaningful 'Linear Interpolation' and 'Arithmetic Operations' in the text latent space, as demonstrated here.")
460
- with gr.Tab("## Linear Interpolation"):
461
  gr.Markdown("This demo uses 256px images, 25 sampling steps (instead of 50), and 10 interpolations (instead of 50) to conserve GPU memory. For better results, see the original [code](https://github.com/qihao067/CrossFlow). (You may adjust them in Advanced Settings, but doing so may trigger OOM errors.)")
462
 
463
  prompt1_tab1 = gr.Text(placeholder="Prompt for first image", label="Prompt 1")
@@ -486,9 +486,11 @@ with gr.Blocks(css=css) as demo:
486
  ],
487
  outputs=[first_image_output_tab1, last_image_output_tab1, gif_output_tab1, seed_tab1]
488
  )
 
 
489
 
490
  # --- Tab 2: Operation Mode (no num_of_interpolation) ---
491
- with gr.Tab("## Arithmetic Operations"):
492
  gr.Markdown("This demo only supports addition or subtraction between two text latents ('Prompt_1 + Prompt_2' or 'Prompt_1 - Prompt_2'). For the other arithmetic operations, see the original [code](https://github.com/qihao067/CrossFlow).")
493
 
494
  prompt1_tab2 = gr.Text(placeholder="Prompt for first image", label="Prompt 1")
 
277
  default_interpolation = 3
278
  return infer(prompt1, prompt2, seed, randomize_seed, guidance_scale, num_inference_steps, default_interpolation, operation_mode)
279
 
280
+ examples_1 = [
281
  ["A robot cooking dinner in the kitchen", "An orange cat wearing sunglasses on a ship"],
282
  ]
283
 
 
452
  # )
453
 
454
  with gr.Blocks(css=css) as demo:
455
+ gr.Markdown("# CrossFlow")
456
+ gr.Markdown("[CrossFlow](https://cross-flow.github.io/) directly transforms text representations into images for text-to-image generation, without the need for both the noise distribution and conditioning mechanism.")
457
+ gr.Markdown("This direct mapping enables meaningful 'Linear Interpolation' and 'Arithmetic Operations' in the text latent space, as demonstrated here.")
458
  with gr.Tabs():
459
  # --- Tab 1: Interpolation Mode (no operation_mode) ---
460
+ with gr.Tab("[Linear Interpolation]"):
 
 
 
461
  gr.Markdown("This demo uses 256px images, 25 sampling steps (instead of 50), and 10 interpolations (instead of 50) to conserve GPU memory. For better results, see the original [code](https://github.com/qihao067/CrossFlow). (You may adjust them in Advanced Settings, but doing so may trigger OOM errors.)")
462
 
463
  prompt1_tab1 = gr.Text(placeholder="Prompt for first image", label="Prompt 1")
 
486
  ],
487
  outputs=[first_image_output_tab1, last_image_output_tab1, gif_output_tab1, seed_tab1]
488
  )
489
+
490
+ gr.Examples(examples=examples_1, inputs=[prompt1, prompt2])
491
 
492
  # --- Tab 2: Operation Mode (no num_of_interpolation) ---
493
+ with gr.Tab("[Arithmetic Operations]"):
494
  gr.Markdown("This demo only supports addition or subtraction between two text latents ('Prompt_1 + Prompt_2' or 'Prompt_1 - Prompt_2'). For the other arithmetic operations, see the original [code](https://github.com/qihao067/CrossFlow).")
495
 
496
  prompt1_tab2 = gr.Text(placeholder="Prompt for first image", label="Prompt 1")