File size: 1,324 Bytes
0f2dddc ef1162a 6238b1d 0f2dddc 6238b1d e2436ee ef1162a 0f2dddc 6238b1d 0f2dddc ef1162a 0f2dddc 6238b1d 0f2dddc e2436ee 6238b1d 0f2dddc e2436ee 6238b1d ef1162a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
from diffusers import StableDiffusionPipeline
import ffmpeg
import os
import gradio as gr
# Load the Stable Diffusion 3.5 Medium model
model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium")
model.to("cuda") # Use GPU for faster processing (if available)
def generate_video(prompt):
# Generate frames
frames = []
for i in range(5): # Adjust the number of frames for your video
image = model(prompt).images[0]
frame_path = f"frame_{i}.png"
image.save(frame_path)
frames.append(frame_path)
# Combine frames into a video
output_video = "output.mp4"
(
ffmpeg
.input("frame_%d.png", framerate=1) # Adjust framerate
.output(output_video)
.run(overwrite_output=True)
)
# Clean up frames
for frame in frames:
os.remove(frame)
return output_video # Path to the generated video
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI Video Generator")
prompt_input = gr.Textbox(label="Enter your video prompt", placeholder="Type something creative...")
video_output = gr.File(label="Download Your Video")
generate_button = gr.Button("Generate Video")
generate_button.click(fn=generate_video, inputs=prompt_input, outputs=video_output)
demo.launch() |