wan2.1-Diffuser / app.py
rahul7star's picture
try anbiter way
aab7d3b verified
raw
history blame
1.17 kB
import torch
import gradio as gr
from diffusers import StableDiffusionPipeline
# Load model manually from Hugging Face model hub or your uploaded files
model_path = "sarthak247/Wan2.1-T2V-1.3B-nf4" # Replace with your model path
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
pipe.to("cuda") # If running on GPU
def generate_video(prompt):
"""
Generates a video from the provided prompt using the pre-loaded model.
"""
try:
# Generate video using the model pipeline
video = pipe(prompt).videos[0] # Assuming output is a video tensor
# Return the generated video
return video
except Exception as e:
print(f"Error during video generation: {e}")
return "Error generating video"
# Gradio UI for video generation
iface = gr.Interface(
fn=generate_video,
inputs=gr.Textbox(label="Enter Text Prompt"),
outputs=gr.Video(label="Generated Video"),
title="Text-to-Video Generation with Wan2.1-T2V",
description="This app generates a video based on the text prompt using the Wan2.1-T2V model."
)
# Launch the Gradio app
iface.launch()