Spaces:
Running
Running
new approach for animate diff model
Browse files
app.py
CHANGED
@@ -1,30 +1,36 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from diffusers import AnimateDiffPipeline
|
4 |
-
from
|
5 |
-
import tempfile
|
6 |
-
import os
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
)
|
12 |
-
pipe.
|
13 |
|
14 |
-
|
15 |
-
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
)
|
|
|
28 |
|
|
|
|
|
29 |
demo.launch()
|
30 |
-
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler
|
4 |
+
from diffusers.utils import export_to_gif
|
|
|
|
|
5 |
|
6 |
+
# Load the motion adapter
|
7 |
+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
|
8 |
+
# Load the base model
|
9 |
+
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
10 |
+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter)
|
11 |
+
# Set up the scheduler
|
12 |
+
scheduler = DDIMScheduler.from_pretrained(
|
13 |
+
model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1
|
14 |
)
|
15 |
+
pipe.scheduler = scheduler
|
16 |
|
17 |
+
# Enable memory savings
|
18 |
+
pipe.enable_vae_slicing()
|
19 |
+
pipe.enable_model_cpu_offload()
|
20 |
|
21 |
+
def generate_animation(prompt):
|
22 |
+
output = pipe(
|
23 |
+
prompt=prompt,
|
24 |
+
negative_prompt="bad quality, worse quality",
|
25 |
+
num_frames=16,
|
26 |
+
guidance_scale=7.5,
|
27 |
+
num_inference_steps=25,
|
28 |
+
generator=torch.Generator("cpu").manual_seed(42),
|
29 |
+
)
|
30 |
+
frames = output.frames[0]
|
31 |
+
export_to_gif(frames, "animation.gif")
|
32 |
+
return "animation.gif"
|
33 |
|
34 |
+
# Create Gradio interface
|
35 |
+
demo = gr.Interface(fn=generate_animation, inputs="text", outputs="image")
|
36 |
demo.launch()
|
|