from diffusers import StableDiffusionPipeline import ffmpeg import os import gradio as gr # Load the Stable Diffusion 3.5 Medium model model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium") model.to("cuda") # Use GPU for faster processing (if available) def generate_video(prompt): # Generate frames frames = [] for i in range(5): # Adjust the number of frames for your video image = model(prompt).images[0] frame_path = f"frame_{i}.png" image.save(frame_path) frames.append(frame_path) # Combine frames into a video output_video = "output.mp4" ( ffmpeg .input("frame_%d.png", framerate=1) # Adjust framerate .output(output_video) .run(overwrite_output=True) ) # Clean up frames for frame in frames: os.remove(frame) return output_video # Path to the generated video # Gradio interface with gr.Blocks() as demo: gr.Markdown("# AI Video Generator") prompt_input = gr.Textbox(label="Enter your video prompt", placeholder="Type something creative...") video_output = gr.File(label="Download Your Video") generate_button = gr.Button("Generate Video") generate_button.click(fn=generate_video, inputs=prompt_input, outputs=video_output) demo.launch()