Wan2.1 / simple_app.py
rahul7star's picture
Update simple_app.py
502938a verified
raw
history blame
3.04 kB
import gradio as gr
import re
import subprocess
import select
from huggingface_hub import snapshot_download
# Download model (for demonstration, adjust based on actual model needs)
snapshot_download(
repo_id="Wan-AI/Wan2.1-T2V-1.3B",
local_dir="./Wan2.1-T2V-1.3B"
)
# Function to generate video
def infer(prompt, progress=gr.Progress(track_tqdm=True)):
# Reduced progress output and simplified structure
command = [
"python", "-u", "-m", "generate", # Using unbuffered output
"--task", "t2v-1.3B",
"--size", "832*480", # You can try reducing resolution further for CPU
"--ckpt_dir", "./Wan2.1-T2V-1.3B",
"--sample_shift", "8",
"--sample_guide_scale", "6",
"--prompt", prompt,
"--save_file", "generated_video.mp4"
]
# Run the model inference in a subprocess
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1)
# Monitor progress with a minimal progress bar
progress_pattern = re.compile(r"(\d+)%\|.*\| (\d+)/(\d+)")
video_progress_bar = None
overall_steps = 0
while True:
rlist, _, _ = select.select([process.stdout], [], [], 0.04)
if rlist:
line = process.stdout.readline()
if not line:
break
stripped_line = line.strip()
if not stripped_line:
continue
# Check for video generation progress
progress_match = progress_pattern.search(stripped_line)
if progress_match:
current = int(progress_match.group(2))
total = int(progress_match.group(3))
if video_progress_bar is None:
video_progress_bar = gr.Progress()
video_progress_bar.update(current / total)
video_progress_bar.update(current / total)
continue
# Process info messages (simplified)
if "INFO:" in stripped_line:
overall_steps += 1
continue
else:
print(stripped_line)
if process.poll() is not None:
break
# Clean up and finalize the progress bar
process.wait()
if video_progress_bar:
video_progress_bar.close()
# Return the video file path if successful
if process.returncode == 0:
return "generated_video.mp4"
else:
raise Exception("Error executing command")
# Gradio UI
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown("# Wan 2.1 1.3B Video Generation")
prompt = gr.Textbox(label="Prompt")
submit_btn = gr.Button("Generate Video")
video_res = gr.Video(label="Generated Video")
submit_btn.click(
fn=infer,
inputs=[prompt],
outputs=[video_res]
)
demo.queue().launch(show_error=True, show_api=False)