Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
1b8c8fd
1
Parent(s):
0d19ec9
debug the generated process
Browse files- app.py +2 -13
- pyramid_dit/pyramid_dit_for_video_gen_pipeline.py +6 -0
app.py
CHANGED
@@ -100,9 +100,7 @@ def generate_video(prompt, image=None, duration=3, guidance_scale=9, video_guida
|
|
100 |
|
101 |
# Gradio interface
|
102 |
with gr.Blocks() as demo:
|
103 |
-
gr.Markdown("#
|
104 |
-
gr.Markdown("Pyramid Flow is a training-efficient Autoregressive Video Generation model based on Flow Matching. It is trained only on open-source datasets within 20.7k A100 GPU hours")
|
105 |
-
gr.Markdown("[[Paper](https://arxiv.org/pdf/2410.05954)], [[Model](https://huggingface.co/rain1011/pyramid-flow-sd3)], [[Code](https://github.com/jy0205/Pyramid-Flow)]")
|
106 |
|
107 |
with gr.Row():
|
108 |
with gr.Column():
|
@@ -117,16 +115,7 @@ with gr.Blocks() as demo:
|
|
117 |
t2v_generate_btn = gr.Button("Generate Video")
|
118 |
with gr.Column():
|
119 |
t2v_output = gr.Video(label=f"Generated Video")
|
120 |
-
|
121 |
-
<div style="display: flex; flex-direction: column;justify-content: center; align-items: center; text-align: center;">
|
122 |
-
<p style="display: flex;gap: 6px;">
|
123 |
-
<a href="https://huggingface.co/spaces/Pyramid-Flow/pyramid-flow?duplicate=true">
|
124 |
-
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg.svg" alt="Duplicate this Space">
|
125 |
-
</a>
|
126 |
-
</p>
|
127 |
-
<p>to use privately and generate videos up to 10s at 24fps</p>
|
128 |
-
</div>
|
129 |
-
""")
|
130 |
gr.Examples(
|
131 |
examples=[
|
132 |
"A movie trailer featuring the adventures of the 30 year old space man wearing a red wool knitted motorcycle helmet, blue sky, salt desert, cinematic style, shot on 35mm film, vivid colors",
|
|
|
100 |
|
101 |
# Gradio interface
|
102 |
with gr.Blocks() as demo:
|
103 |
+
gr.Markdown("# R1")
|
|
|
|
|
104 |
|
105 |
with gr.Row():
|
106 |
with gr.Column():
|
|
|
115 |
t2v_generate_btn = gr.Button("Generate Video")
|
116 |
with gr.Column():
|
117 |
t2v_output = gr.Video(label=f"Generated Video")
|
118 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
gr.Examples(
|
120 |
examples=[
|
121 |
"A movie trailer featuring the adventures of the 30 year old space man wearing a red wool knitted motorcycle helmet, blue sky, salt desert, cinematic style, shot on 35mm film, vivid colors",
|
pyramid_dit/pyramid_dit_for_video_gen_pipeline.py
CHANGED
@@ -560,6 +560,12 @@ class PyramidDiTForVideoGeneration:
|
|
560 |
else:
|
561 |
# prepare the condition latents
|
562 |
past_condition_latents = []
|
|
|
|
|
|
|
|
|
|
|
|
|
563 |
clean_latents_list = self.get_pyramid_latent(torch.cat(generated_latents_list, dim=2), len(stages) - 1)
|
564 |
|
565 |
for i_s in range(len(stages)):
|
|
|
560 |
else:
|
561 |
# prepare the condition latents
|
562 |
past_condition_latents = []
|
563 |
+
# if not generated_latents_list or len(generated_latents_list) == 0:
|
564 |
+
# raise ValueError("No latent tensors generated - check previous steps ")
|
565 |
+
|
566 |
+
print(f"Number of generated latents: {len(generated_latents_list)}")
|
567 |
+
print(f"Shapes of generated latents: {[x.shape for x in generated_latents_list]}")
|
568 |
+
|
569 |
clean_latents_list = self.get_pyramid_latent(torch.cat(generated_latents_list, dim=2), len(stages) - 1)
|
570 |
|
571 |
for i_s in range(len(stages)):
|