JoeS51 commited on
Commit
51c8f6b
·
1 Parent(s): eb0262e

new approach for animate diff model

Browse files
Files changed (1) hide show
  1. app.py +28 -22
app.py CHANGED
@@ -1,30 +1,36 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import AnimateDiffPipeline
4
- from PIL import Image
5
- import tempfile
6
- import os
7
 
8
- pipe = AnimateDiffPipeline.from_pretrained(
9
- "ByteDance/AnimateDiff-Lightning",
10
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
 
 
 
 
 
11
  )
12
- pipe.to("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
- def generate_video(prompt):
15
- frames = pipe(prompt).frames
 
16
 
17
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video:
18
- pipe.export_to_video(frames, temp_video.name)
19
- return temp_video.name
20
-
21
- demo = gr.Interface(
22
- fn=generate_video,
23
- inputs=gr.Textbox(label="Prompt"),
24
- outputs=gr.Video(label="Generated Animation"),
25
- title="AnimateDiff Demo",
26
- description="Generate animations from text prompts using AnimateDiff."
27
- )
 
28
 
 
 
29
  demo.launch()
30
-
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler
4
+ from diffusers.utils import export_to_gif
 
 
5
 
6
+ # Load the motion adapter
7
+ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
8
+ # Load the base model
9
+ model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
10
+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter)
11
+ # Set up the scheduler
12
+ scheduler = DDIMScheduler.from_pretrained(
13
+ model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1
14
  )
15
+ pipe.scheduler = scheduler
16
 
17
+ # Enable memory savings
18
+ pipe.enable_vae_slicing()
19
+ pipe.enable_model_cpu_offload()
20
 
21
+ def generate_animation(prompt):
22
+ output = pipe(
23
+ prompt=prompt,
24
+ negative_prompt="bad quality, worse quality",
25
+ num_frames=16,
26
+ guidance_scale=7.5,
27
+ num_inference_steps=25,
28
+ generator=torch.Generator("cpu").manual_seed(42),
29
+ )
30
+ frames = output.frames[0]
31
+ export_to_gif(frames, "animation.gif")
32
+ return "animation.gif"
33
 
34
+ # Create Gradio interface
35
+ demo = gr.Interface(fn=generate_animation, inputs="text", outputs="image")
36
  demo.launch()