Spaces:
Runtime error
Runtime error
File size: 1,167 Bytes
adced09 12a63af aab7d3b 12a63af aab7d3b b85cf9b 12a63af aab7d3b 12a63af aab7d3b 12a63af aab7d3b adced09 aab7d3b 12a63af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import torch
import gradio as gr
from diffusers import StableDiffusionPipeline
# Load model manually from Hugging Face model hub or your uploaded files
model_path = "sarthak247/Wan2.1-T2V-1.3B-nf4" # Replace with your model path
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
pipe.to("cuda") # If running on GPU
def generate_video(prompt):
"""
Generates a video from the provided prompt using the pre-loaded model.
"""
try:
# Generate video using the model pipeline
video = pipe(prompt).videos[0] # Assuming output is a video tensor
# Return the generated video
return video
except Exception as e:
print(f"Error during video generation: {e}")
return "Error generating video"
# Gradio UI for video generation
iface = gr.Interface(
fn=generate_video,
inputs=gr.Textbox(label="Enter Text Prompt"),
outputs=gr.Video(label="Generated Video"),
title="Text-to-Video Generation with Wan2.1-T2V",
description="This app generates a video based on the text prompt using the Wan2.1-T2V model."
)
# Launch the Gradio app
iface.launch()
|