import gradio as gr import requests HF_API_TOKEN = "YOUR_HF_API_TOKEN" API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1" headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.content def generate_video(prompt): os.makedirs("frames", exist_ok=True) for i in range(10): image_bytes = query({"inputs": prompt}) with open(f"frames/frame_{i:03d}.png", "wb") as f: f.write(image_bytes) create_video_from_frames("frames", "generated_video.mp4", fps=2) return "generated_video.mp4" def create_video_from_frames(frame_folder, output_path, fps=2): import cv2 images = sorted([img for img in os.listdir(frame_folder) if img.endswith(".png")]) frame = cv2.imread(os.path.join(frame_folder, images[0])) height, width, _ = frame.shape video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) for img in images: video.write(cv2.imread(os.path.join(frame_folder, img))) video.release() iface = gr.Interface(fn=generate_video, inputs=gr.Textbox(lines=3), outputs=gr.Video(), title="Text to Video AI") if __name__ == "__main__": iface.launch()