Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,16 @@
|
|
1 |
-
import gradio as gr
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
import ffmpeg
|
4 |
import os
|
|
|
5 |
|
6 |
-
# Load the Stable Diffusion model
|
7 |
-
model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-
|
|
|
8 |
|
9 |
def generate_video(prompt):
|
10 |
# Generate frames
|
11 |
frames = []
|
12 |
-
for i in range(5): #
|
13 |
image = model(prompt).images[0]
|
14 |
frame_path = f"frame_{i}.png"
|
15 |
image.save(frame_path)
|
@@ -19,16 +20,16 @@ def generate_video(prompt):
|
|
19 |
output_video = "output.mp4"
|
20 |
(
|
21 |
ffmpeg
|
22 |
-
.input("frame_%d.png", framerate=1) # Adjust framerate
|
23 |
.output(output_video)
|
24 |
.run(overwrite_output=True)
|
25 |
)
|
26 |
|
27 |
-
# Clean up
|
28 |
for frame in frames:
|
29 |
os.remove(frame)
|
30 |
|
31 |
-
return output_video #
|
32 |
|
33 |
# Gradio interface
|
34 |
with gr.Blocks() as demo:
|
|
|
|
|
1 |
from diffusers import StableDiffusionPipeline
|
2 |
import ffmpeg
|
3 |
import os
|
4 |
+
import gradio as gr
|
5 |
|
6 |
+
# Load the Stable Diffusion 3.5 Medium model
|
7 |
+
model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium")
|
8 |
+
model.to("cuda") # Use GPU for faster processing (if available)
|
9 |
|
10 |
def generate_video(prompt):
|
11 |
# Generate frames
|
12 |
frames = []
|
13 |
+
for i in range(5): # Adjust the number of frames for your video
|
14 |
image = model(prompt).images[0]
|
15 |
frame_path = f"frame_{i}.png"
|
16 |
image.save(frame_path)
|
|
|
20 |
output_video = "output.mp4"
|
21 |
(
|
22 |
ffmpeg
|
23 |
+
.input("frame_%d.png", framerate=1) # Adjust framerate
|
24 |
.output(output_video)
|
25 |
.run(overwrite_output=True)
|
26 |
)
|
27 |
|
28 |
+
# Clean up frames
|
29 |
for frame in frames:
|
30 |
os.remove(frame)
|
31 |
|
32 |
+
return output_video # Path to the generated video
|
33 |
|
34 |
# Gradio interface
|
35 |
with gr.Blocks() as demo:
|