Spaces:
Running
Running
File size: 3,674 Bytes
16d6f3f 9f79522 06a1546 16d6f3f 06a1546 768257d 5bb8873 67016b2 16d6f3f 176c646 16d6f3f 06a1546 16d6f3f 5bb8873 67016b2 ff4e5c9 06a1546 67016b2 176c646 06a1546 768257d 06a1546 5bb8873 06a1546 e5501e0 5bb8873 06a1546 5bb8873 06a1546 5bb8873 06a1546 5bb8873 06a1546 5bb8873 06a1546 5bb8873 06a1546 e5501e0 5bb8873 06a1546 5bb8873 06a1546 5bb8873 176c646 06a1546 16d6f3f 5bb8873 16d6f3f 67016b2 06a1546 67016b2 16d6f3f 06a1546 ff4e5c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
import moviepy.editor as mp
import numpy as np
from PIL import Image
import os
import sys
# Redirect stdout to ensure logs are visible
sys.stdout = sys.stderr
# Resize image while maintaining aspect ratio
def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
width, height = img.size
target_width, target_height = target_size
aspect_ratio = width / height
target_aspect_ratio = target_width / target_height
if aspect_ratio > target_aspect_ratio:
new_width = target_width
new_height = int(new_width / aspect_ratio)
else:
new_height = target_height
new_width = int(new_height * aspect_ratio)
img_resized = img.resize((new_width, new_height))
final_img = Image.new('RGB', target_size, padding_color)
final_img.paste(img_resized, ((target_width - new_width) // 2, (target_height - new_height) // 2))
return final_img
# Apply zoom effect to image clip
def apply_zoom_effect(image_clip):
zoomed_clip = image_clip.resize(lambda t: 1 + 0.05 * t) # Zoom in gradually
return zoomed_clip
# Video generation function
def process_and_generate_video(audio_file, images):
try:
print("Starting video generation...")
# Validate input files
if not audio_file:
raise ValueError("No audio file provided.")
if not images or len(images) == 0:
raise ValueError("No images provided.")
# Load audio file
print(f"Audio file path: {audio_file}")
audio = mp.AudioFileClip(audio_file)
audio_duration = audio.duration
print(f"Audio duration: {audio_duration} seconds")
# Process images
image_clips = []
image_duration = audio_duration / len(images)
for img_path in images:
print(f"Processing image: {img_path}")
img = Image.open(img_path)
img = resize_and_fit_image(img, target_size=(1280, 720))
# Create image clip with zoom effect
img_clip = mp.ImageClip(np.array(img)).set_duration(image_duration).set_fps(30)
img_clip = apply_zoom_effect(img_clip)
image_clips.append(img_clip)
print(f"Generated {len(image_clips)} image clips.")
# Concatenate image clips and set audio
video = mp.concatenate_videoclips(image_clips, method="compose")
video = video.set_audio(audio)
# Save video to temporary directory
output_path = "/tmp/generated_video.mp4"
print(f"Saving video to {output_path}...")
video.write_videofile(output_path, codec="libx264", audio_codec="aac")
return output_path
except Exception as e:
error_message = f"Error during video generation: {e}"
print(error_message) # Log error
return error_message # Return error as feedback
# Gradio interface setup
def gradio_interface():
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
mp3_input = gr.Audio(type="filepath", label="Upload MP3")
image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple")
generate_button = gr.Button("Generate Video")
output_video = gr.Video(label="Generated Video")
error_output = gr.Textbox(label="Error Log", interactive=False)
generate_button.click(
fn=process_and_generate_video,
inputs=[mp3_input, image_input],
outputs=[output_video, error_output]
)
demo.launch()
# Run the Gradio interface
gradio_interface()
|