Ankit8544 commited on
Commit
c8eea54
·
verified ·
1 Parent(s): e6e24b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -51
app.py CHANGED
@@ -1,51 +1,51 @@
1
- from flask import Flask, request, jsonify, send_file
2
- import torch
3
- from diffusers.utils import export_to_video
4
- from diffusers import AutoencoderKLWan, WanPipeline
5
- from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
6
- import os
7
- from uuid import uuid4
8
-
9
- app = Flask(__name__)
10
-
11
- # Load the model once at startup
12
- model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
13
- vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
14
- scheduler = UniPCMultistepScheduler(
15
- prediction_type='flow_prediction',
16
- use_flow_sigmas=True,
17
- num_train_timesteps=1000,
18
- flow_shift=5.0
19
- )
20
- pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
21
- pipe.scheduler = scheduler
22
- pipe.to("cuda")
23
-
24
- @app.route('/generate_video', methods=['POST'])
25
- def generate_video():
26
- data = request.json
27
- prompt = data.get('prompt')
28
- negative_prompt = data.get('negative_prompt', '')
29
- height = data.get('height', 720)
30
- width = data.get('width', 1280)
31
- num_frames = data.get('num_frames', 81)
32
- guidance_scale = data.get('guidance_scale', 5.0)
33
-
34
- output = pipe(
35
- prompt=prompt,
36
- negative_prompt=negative_prompt,
37
- height=height,
38
- width=width,
39
- num_frames=num_frames,
40
- guidance_scale=guidance_scale,
41
- ).frames[0]
42
-
43
- output_filename = f"{uuid4()}.mp4"
44
- output_path = os.path.join("outputs", output_filename)
45
- os.makedirs("outputs", exist_ok=True)
46
- export_to_video(output, output_path, fps=16)
47
-
48
- return send_file(output_path, mimetype='video/mp4')
49
-
50
- if __name__ == '__main__':
51
- app.run(host='0.0.0.0', port=5000)
 
1
+ from flask import Flask, request, jsonify, send_file
2
+ import torch
3
+ from diffusers.utils import export_to_video
4
+ from diffusers import AutoencoderKLWan, WanPipeline
5
+ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
6
+ import os
7
+ from uuid import uuid4
8
+
9
+ app = Flask(__name__)
10
+
11
+ # Load the model once at startup
12
+ model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
13
+ vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
14
+ scheduler = UniPCMultistepScheduler(
15
+ prediction_type='flow_prediction',
16
+ use_flow_sigmas=True,
17
+ num_train_timesteps=1000,
18
+ flow_shift=5.0
19
+ )
20
+ pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
21
+ pipe.scheduler = scheduler
22
+ pipe.to("cuda")
23
+
24
+ @app.route('/generate_video', methods=['POST'])
25
+ def generate_video():
26
+ data = request.json
27
+ prompt = data.get('prompt')
28
+ negative_prompt = data.get('negative_prompt', '')
29
+ height = data.get('height', 720)
30
+ width = data.get('width', 1280)
31
+ num_frames = data.get('num_frames', 81)
32
+ guidance_scale = data.get('guidance_scale', 5.0)
33
+
34
+ output = pipe(
35
+ prompt=prompt,
36
+ negative_prompt=negative_prompt,
37
+ height=height,
38
+ width=width,
39
+ num_frames=num_frames,
40
+ guidance_scale=guidance_scale,
41
+ ).frames[0]
42
+
43
+ output_filename = f"{uuid4()}.mp4"
44
+ output_path = os.path.join("outputs", output_filename)
45
+ os.makedirs("outputs", exist_ok=True)
46
+ export_to_video(output, output_path, fps=16)
47
+
48
+ return send_file(output_path, mimetype='video/mp4')
49
+
50
+ if __name__ == '__main__':
51
+ app.run(host='0.0.0.0', port=7860)