Ankit8544 commited on
Commit
e5ca2d5
·
verified ·
1 Parent(s): 06f8a11

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -110
app.py CHANGED
@@ -1,110 +1,110 @@
1
- from flask import Flask, request, jsonify, send_from_directory
2
- from modelscope.pipelines import pipeline
3
- from modelscope.utils.constant import Tasks
4
- from diffusers import DiffusionPipeline
5
- import torch
6
- import imageio
7
- import uuid
8
- import os
9
-
10
- app = Flask(__name__)
11
-
12
- # Load models once
13
- modelscope_model = pipeline(Tasks.text_to_video_synthesis, model='damo-vilab/modelscope-text-to-video-synthesis')
14
- zeroscope_model = DiffusionPipeline.from_pretrained(
15
- "cerspense/zeroscope_v2_576w",
16
- torch_dtype=torch.float16
17
- ).to("cuda")
18
-
19
- # Output directory (for temporary files)
20
- os.makedirs("output", exist_ok=True)
21
-
22
- # Serve the generated video dynamically (no permanent storage)
23
- @app.route('/generate-video', methods=['POST'])
24
- def generate_video():
25
- data = request.get_json()
26
-
27
- model_type = data.get("model", "").lower()
28
- if not model_type:
29
- return jsonify({"error": "Model is required: 'modelscope' or 'zeroscope'"}), 400
30
-
31
- params = data.get("params", {})
32
- prompt = params.get("prompt")
33
- if not prompt:
34
- return jsonify({"error": "Prompt is required in 'params'"}), 400
35
-
36
- # Common settings
37
- seed = params.get("seed", 42)
38
- output_filename = f"{uuid.uuid4().hex}.mp4"
39
- output_path = os.path.join("output", output_filename)
40
-
41
- try:
42
- if model_type == "modelscope":
43
- # Extract ModelScope-specific parameters
44
- result = modelscope_model({
45
- "text": prompt,
46
- "output_path": output_path,
47
- "seed": seed,
48
- "fps": params.get("fps", 8),
49
- "num_frames": params.get("num_frames", 16),
50
- "batch_size": params.get("batch_size", 1),
51
- "decode_audio": params.get("decode_audio", False),
52
- "resolution": params.get("resolution", "512x512")
53
- })
54
- video_path = result["output_path"]
55
-
56
- elif model_type == "zeroscope":
57
- # Extract Zeroscope-specific parameters
58
- inference_steps = params.get("num_inference_steps", 25)
59
- height = params.get("height", 320)
60
- width = params.get("width", 576)
61
- fps = params.get("fps", 8)
62
-
63
- generator = torch.manual_seed(seed)
64
-
65
- with torch.autocast("cuda"):
66
- output = zeroscope_model(
67
- prompt,
68
- num_inference_steps=inference_steps,
69
- height=height,
70
- width=width,
71
- generator=generator
72
- )
73
- video_frames = output.frames
74
-
75
- # Save as video
76
- imageio.mimsave(output_path, video_frames, fps=fps)
77
- video_path = output_path
78
-
79
- else:
80
- return jsonify({"error": f"Unsupported model type: {model_type}"}), 400
81
-
82
- # Return a URL to the generated video file
83
- video_url = f"/download-video/{output_filename}"
84
-
85
- return jsonify({
86
- "success": True,
87
- "model": model_type,
88
- "video_url": video_url,
89
- "filename": output_filename
90
- })
91
-
92
- except Exception as e:
93
- return jsonify({
94
- "success": False,
95
- "error": str(e)
96
- }), 500
97
-
98
-
99
- # Endpoint to download the generated video
100
- @app.route('/download-video/<filename>', methods=['GET'])
101
- def download_video(filename):
102
- try:
103
- return send_from_directory("output", filename, as_attachment=True)
104
- except FileNotFoundError:
105
- return jsonify({"error": "Video file not found"}), 404
106
-
107
-
108
- if __name__ == '__main__':
109
- app.run(host='0.0.0.0', port=int(os.environ.get("PORT", 7860)))
110
-
 
1
+ from flask import Flask, request, jsonify, send_from_directory
2
+ from modelscope.pipelines import pipeline
3
+ from modelscope.utils.constant import Tasks
4
+ from diffusers import DiffusionPipeline
5
+ import torch
6
+ import imageio
7
+ import uuid
8
+ import os
9
+
10
+ app = Flask(__name__)
11
+
12
+ # Load models once
13
+ modelscope_model = pipeline(Tasks.text_to_video_synthesis, model='damo-vilab/modelscope-text-to-video-synthesis')
14
+ zeroscope_model = DiffusionPipeline.from_pretrained(
15
+ "cerspense/zeroscope_v2_576w",
16
+ torch_dtype=torch.float16
17
+ ).to("cuda")
18
+
19
+ # Output directory (for temporary files)
20
+ os.makedirs("output", exist_ok=True)
21
+
22
+ # Serve the generated video dynamically (no permanent storage)
23
+ @app.route('/generate-video', methods=['POST'])
24
+ def generate_video():
25
+ data = request.get_json()
26
+
27
+ model_type = data.get("model", "").lower()
28
+ if not model_type:
29
+ return jsonify({"error": "Model is required: 'modelscope' or 'zeroscope'"}), 400
30
+
31
+ params = data.get("params", {})
32
+ prompt = params.get("prompt")
33
+ if not prompt:
34
+ return jsonify({"error": "Prompt is required in 'params'"}), 400
35
+
36
+ # Common settings
37
+ seed = params.get("seed", 42)
38
+ output_filename = f"{uuid.uuid4().hex}.mp4"
39
+ output_path = os.path.join("output", output_filename)
40
+
41
+ try:
42
+ if model_type == "modelscope":
43
+ # Extract ModelScope-specific parameters
44
+ result = modelscope_model({
45
+ "text": prompt,
46
+ "output_path": output_path,
47
+ "seed": seed,
48
+ "fps": params.get("fps", 8),
49
+ "num_frames": params.get("num_frames", 16),
50
+ "batch_size": params.get("batch_size", 1),
51
+ "decode_audio": params.get("decode_audio", False),
52
+ "resolution": params.get("resolution", "512x512")
53
+ })
54
+ video_path = result["output_path"]
55
+
56
+ elif model_type == "zeroscope":
57
+ # Extract Zeroscope-specific parameters
58
+ inference_steps = params.get("num_inference_steps", 25)
59
+ height = params.get("height", 320)
60
+ width = params.get("width", 576)
61
+ fps = params.get("fps", 8)
62
+
63
+ generator = torch.manual_seed(seed)
64
+
65
+ with torch.autocast("cuda"):
66
+ output = zeroscope_model(
67
+ prompt,
68
+ num_inference_steps=inference_steps,
69
+ height=height,
70
+ width=width,
71
+ generator=generator
72
+ )
73
+ video_frames = output.frames
74
+
75
+ # Save as video
76
+ imageio.mimsave(output_path, video_frames, fps=fps)
77
+ video_path = output_path
78
+
79
+ else:
80
+ return jsonify({"error": f"Unsupported model type: {model_type}"}), 400
81
+
82
+ # Return a URL to the generated video file
83
+ video_url = f"/download-video/{output_filename}"
84
+
85
+ return jsonify({
86
+ "success": True,
87
+ "model": model_type,
88
+ "video_url": video_url,
89
+ "filename": output_filename
90
+ })
91
+
92
+ except Exception as e:
93
+ return jsonify({
94
+ "success": False,
95
+ "error": str(e)
96
+ }), 500
97
+
98
+
99
+ # Endpoint to download the generated video
100
+ @app.route('/download-video/<filename>', methods=['GET'])
101
+ def download_video(filename):
102
+ try:
103
+ return send_from_directory("output", filename, as_attachment=True)
104
+ except FileNotFoundError:
105
+ return jsonify({"error": "Video file not found"}), 404
106
+
107
+
108
+ if __name__ == '__main__':
109
+ app.run(host='0.0.0.0', port=5000)
110
+