Spaces:
Running
Running
File size: 1,174 Bytes
3ba9cd2 464d4d6 fbf5a33 3ba9cd2 464d4d6 c4e5438 464d4d6 3ba9cd2 464d4d6 3ba9cd2 464d4d6 fbf5a33 42a48d7 3ba9cd2 42a48d7 3ba9cd2 464d4d6 3ba9cd2 464d4d6 3ba9cd2 464d4d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import streamlit as st
from diffusers import DiffusionPipeline
import torch
import os
import imageio
st.set_page_config(page_title="Gen-V", layout="centered")
st.title("🎥 Gen-V: Text-to-Video Generator")
st.write("Generate AI-powered videos from text prompts using open-source models!")
prompt = st.text_input("Enter a prompt", "cat wearing black goggles")
if st.button("🎬 Generate Video"):
with st.spinner("Generating video... this might take a bit ⏳"):
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
use_fp16 = torch.cuda.is_available()
pipe = DiffusionPipeline.from_pretrained(
"damo-vilab/text-to-video-ms-1.7b",
torch_dtype=torch.float16 if use_fp16 else torch.float32,
variant="fp16" if use_fp16 else None,
).to("cuda" if use_fp16 else "cpu")
video_frames = pipe(prompt).frames # Returns list of PIL Images (frames)
output_path = "genv_output.mp4"
imageio.mimsave(output_path, video_frames, fps=8)
st.success("Video generation complete!")
st.video(output_path)
|