Spaces:
Running
Running
import gradio as gr | |
import moviepy.editor as mp | |
import numpy as np | |
from PIL import Image | |
import os | |
import sys | |
# Redirect stdout to ensure logs are visible | |
sys.stdout = sys.stderr | |
# Resize image while maintaining aspect ratio | |
def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)): | |
width, height = img.size | |
target_width, target_height = target_size | |
aspect_ratio = width / height | |
target_aspect_ratio = target_width / target_height | |
if aspect_ratio > target_aspect_ratio: | |
new_width = target_width | |
new_height = int(new_width / aspect_ratio) | |
else: | |
new_height = target_height | |
new_width = int(new_height * aspect_ratio) | |
img_resized = img.resize((new_width, new_height)) | |
final_img = Image.new('RGB', target_size, padding_color) | |
final_img.paste(img_resized, ((target_width - new_width) // 2, (target_height - new_height) // 2)) | |
return final_img | |
# Apply zoom effect to image clip | |
def apply_zoom_effect(image_clip): | |
zoomed_clip = image_clip.resize(lambda t: 1 + 0.05 * t) # Zoom in gradually | |
return zoomed_clip | |
# Video generation function | |
def process_and_generate_video(audio_file, images): | |
try: | |
print("Starting video generation...") | |
# Validate input files | |
if not audio_file: | |
raise ValueError("No audio file provided.") | |
if not images or len(images) == 0: | |
raise ValueError("No images provided.") | |
# Load audio file | |
print(f"Audio file path: {audio_file}") | |
audio = mp.AudioFileClip(audio_file) | |
audio_duration = audio.duration | |
print(f"Audio duration: {audio_duration} seconds") | |
# Process images | |
image_clips = [] | |
image_duration = audio_duration / len(images) | |
for img_path in images: | |
print(f"Processing image: {img_path}") | |
img = Image.open(img_path) | |
img = resize_and_fit_image(img, target_size=(1280, 720)) | |
# Create image clip with zoom effect | |
img_clip = mp.ImageClip(np.array(img)).set_duration(image_duration).set_fps(30) | |
img_clip = apply_zoom_effect(img_clip) | |
image_clips.append(img_clip) | |
print(f"Generated {len(image_clips)} image clips.") | |
# Concatenate image clips and set audio | |
video = mp.concatenate_videoclips(image_clips, method="compose") | |
video = video.set_audio(audio) | |
# Save video to temporary directory | |
output_path = "/tmp/generated_video.mp4" | |
print(f"Saving video to {output_path}...") | |
video.write_videofile(output_path, codec="libx264", audio_codec="aac") | |
return output_path | |
except Exception as e: | |
error_message = f"Error during video generation: {e}" | |
print(error_message) # Log error | |
return error_message # Return error as feedback | |
# Gradio interface setup | |
def gradio_interface(): | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
mp3_input = gr.Audio(type="filepath", label="Upload MP3") | |
image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple") | |
generate_button = gr.Button("Generate Video") | |
output_video = gr.Video(label="Generated Video") | |
error_output = gr.Textbox(label="Error Log", interactive=False) | |
generate_button.click( | |
fn=process_and_generate_video, | |
inputs=[mp3_input, image_input], | |
outputs=[output_video, error_output] | |
) | |
demo.launch() | |
# Run the Gradio interface | |
gradio_interface() | |