Spaces:
Running
Running
import gradio as gr | |
import torch | |
from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler | |
from diffusers.utils import export_to_gif | |
# Load the motion adapter | |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") | |
# Load the base model | |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE" | |
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) | |
# Set up the scheduler | |
scheduler = DDIMScheduler.from_pretrained( | |
model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 | |
) | |
pipe.scheduler = scheduler | |
# Enable memory savings | |
pipe.enable_vae_slicing() | |
pipe.enable_model_cpu_offload() | |
def generate_animation(prompt): | |
output = pipe( | |
prompt=prompt, | |
negative_prompt="bad quality, worse quality", | |
num_frames=16, | |
guidance_scale=7.5, | |
num_inference_steps=25, | |
generator=torch.Generator("cpu").manual_seed(42), | |
) | |
frames = output.frames[0] | |
export_to_gif(frames, "animation.gif") | |
return "animation.gif" | |
# Create Gradio interface | |
demo = gr.Interface(fn=generate_animation, inputs="text", outputs="image") | |
demo.launch() | |