Wan2.1-API / app.py
Ankit8544's picture
Update app.py
87ff327 verified
raw
history blame
1.76 kB
from flask import Flask, request, jsonify, send_file
import torch
from diffusers.utils import export_to_video
from diffusers import AutoencoderKLWan, WanPipeline
from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
import os
from uuid import uuid4
app = Flask(__name__)
@app.route('/')
def index():
return jsonify({"message": "Welcome to the Wan2.1 Video Generation API!", "status": "running"})
# Load the model once at startup
model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
scheduler = UniPCMultistepScheduler(
prediction_type='flow_prediction',
use_flow_sigmas=True,
num_train_timesteps=1000,
flow_shift=5.0
)
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
pipe.scheduler = scheduler
pipe.to("cuda")
@app.route('/generate_video', methods=['POST'])
def generate_video():
data = request.json
prompt = data.get('prompt')
negative_prompt = data.get('negative_prompt', '')
height = data.get('height', 720)
width = data.get('width', 1280)
num_frames = data.get('num_frames', 81)
guidance_scale = data.get('guidance_scale', 5.0)
output = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_frames=num_frames,
guidance_scale=guidance_scale,
).frames[0]
output_filename = f"{uuid4()}.mp4"
output_path = os.path.join("outputs", output_filename)
os.makedirs("outputs", exist_ok=True)
export_to_video(output, output_path, fps=16)
return send_file(output_path, mimetype='video/mp4')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860)