Spaces:
Sleeping
Sleeping
File size: 10,114 Bytes
3e3c550 c7600a9 3e3c550 7d3e520 3e3c550 c7600a9 3e3c550 7d3e520 3e3c550 7d3e520 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 7d3e520 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 7d3e520 c7600a9 3e3c550 7d3e520 3e3c550 0212e21 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 c7600a9 3e3c550 15daf82 c7600a9 3e3c550 c7600a9 3e3c550 15daf82 3e3c550 15daf82 3e3c550 15daf82 3e3c550 c7600a9 3e3c550 15daf82 3e3c550 15daf82 0212e21 3e3c550 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
# Import necessary libraries (assuming all your imports remain the same)
import gradio as gr
import os
import tempfile
import shutil
from moviepy.editor import concatenate_videoclips, CompositeVideoClip, AudioFileClip, TextClip
# Your existing helper functions (generate_script, parse_script, etc.) remain unchanged
# Ensure TEMP_FOLDER, TARGET_RESOLUTION, and CAPTION_COLOR are set within functions as needed
# Define maximum number of clips to handle in the UI
MAX_CLIPS = 10
def process_script(topic, script_input):
"""Process the topic or script and return updates for the UI."""
if script_input.strip():
raw_script = script_input
else:
raw_script = generate_script(topic)
if not raw_script:
return "Failed to generate script", 0, [], [], [], []
elements = parse_script(raw_script)
paired_elements = [(elements[i], elements[i + 1]) for i in range(0, len(elements) - 1, 2)]
num_clips = min(len(paired_elements), MAX_CLIPS)
# Prepare updates for clip editor
accordion_updates = []
prompt_updates = []
narration_updates = []
media_updates = []
for i in range(MAX_CLIPS):
if i < num_clips:
media_elem, tts_elem = paired_elements[i]
accordion_updates.append(gr.update(visible=True, label=f"Clip {i+1}: {media_elem['prompt'][:20]}..."))
prompt_updates.append(gr.update(value=media_elem['prompt']))
narration_updates.append(gr.update(value=tts_elem['text']))
media_updates.append(gr.update(value=None)) # Reset file upload
else:
accordion_updates.append(gr.update(visible=False))
prompt_updates.append(gr.update(value=""))
narration_updates.append(gr.update(value=""))
media_updates.append(gr.update(value=None))
return raw_script, num_clips, accordion_updates, prompt_updates, narration_updates, media_updates
def generate_video_full(resolution, render_speed, video_clip_percent, zoom_pan_effect,
bgm_upload, bgm_volume, subtitles_enabled, font, font_size,
outline_width, font_color, outline_color, position, num_clips,
*clip_inputs):
"""Generate the video using all settings and edited clip data."""
global TARGET_RESOLUTION, CAPTION_COLOR, TEMP_FOLDER
# Set resolution
TARGET_RESOLUTION = (1080, 1920) if resolution == "Short (1080x1920)" else (1920, 1080)
# Set caption settings
CAPTION_COLOR = font_color if subtitles_enabled else "transparent"
# Create temporary folder
TEMP_FOLDER = tempfile.mkdtemp()
# Parse clip inputs (visual_prompt, narration, custom_media for each clip)
clips_data = []
for i in range(num_clips):
idx = i * 3
visual_prompt = clip_inputs[idx]
narration = clip_inputs[idx + 1]
custom_media = clip_inputs[idx + 2]
clips_data.append({
'visual_prompt': visual_prompt,
'narration': narration,
'custom_media': custom_media
})
# Generate clips
clips = []
for idx, clip_data in enumerate(clips_data):
# Use custom media if provided, otherwise generate media
if clip_data['custom_media']:
media_path = clip_data['custom_media']
asset_type = 'video' if media_path.endswith(('.mp4', '.avi', '.mov')) else 'image'
else:
media_asset = generate_media(clip_data['visual_prompt'], current_index=idx, total_segments=num_clips)
if not media_asset:
continue
media_path = media_asset['path']
asset_type = media_asset['asset_type']
# Adjust video clip percentage
original_random = random.random()
adjusted_random = original_random * (video_clip_percent / 100)
if adjusted_random < (video_clip_percent / 100) and not clip_data['custom_media']:
media_asset = generate_media(clip_data['visual_prompt'], current_index=idx, total_segments=num_clips)
if media_asset and media_asset['asset_type'] == 'video':
media_path = media_asset['path']
asset_type = 'video'
# Generate TTS
tts_path = generate_tts(clip_data['narration'], 'en')
if not tts_path:
continue
# Create clip
duration = max(3, len(clip_data['narration'].split()) * 0.5)
clip = create_clip(
media_path=media_path,
asset_type=asset_type,
tts_path=tts_path,
duration=duration,
effects='fade-in',
narration_text=clip_data['narration'],
segment_index=idx
)
if clip and zoom_pan_effect and asset_type == 'image':
clip = apply_kenburns_effect(clip, TARGET_RESOLUTION)
if clip:
clips.append(clip)
if not clips:
shutil.rmtree(TEMP_FOLDER)
return None, None
# Concatenate clips
final_video = concatenate_videoclips(clips, method="compose")
# Add background music if uploaded
if bgm_upload:
bg_music = AudioFileClip(bgm_upload).volumex(bgm_volume)
if bg_music.duration < final_video.duration:
bg_music = bg_music.loop(duration=final_video.duration)
else:
bg_music = bg_music.subclip(0, final_video.duration)
final_video = final_video.set_audio(CompositeVideoClip([final_video.audio, bg_music]))
# Export video
output_path = "final_video.mp4"
final_video.write_videofile(output_path, codec='libx264', fps=24, preset=render_speed)
# Clean up
shutil.rmtree(TEMP_FOLDER)
return output_path, output_path
# Gradio Blocks Interface
with gr.Blocks(title="π Orbit Video Engine") as demo:
gr.Markdown("# π Orbit Video Engine")
gr.Markdown("Create funny documentary-style videos with ease!")
with gr.Row():
# Column 1: Content Input & Script Generation
with gr.Column(scale=1):
gr.Markdown("### 1. Content Input")
topic_input = gr.Textbox(label="Topic", placeholder="e.g., Funny Cat Facts")
script_input = gr.Textbox(label="Or Paste Full Script", lines=10, placeholder="[Title]\nNarration...")
generate_button = gr.Button("π Generate Script & Load Clips")
script_display = gr.Textbox(label="Generated Script", interactive=False, visible=False)
# Column 2: Clip Editor
with gr.Column(scale=2):
gr.Markdown("### 2. Edit Clips")
gr.Markdown("Modify prompts, narration, or upload custom media below.")
with gr.Column() as clip_editor:
clip_accordions = []
for i in range(MAX_CLIPS):
with gr.Accordion(f"Clip {i+1}", visible=False) as acc:
visual_prompt = gr.Textbox(label="Visual Prompt")
narration = gr.Textbox(label="Narration", lines=3)
custom_media = gr.File(label="Upload Custom Media (Image/Video)")
clip_accordions.append((acc, visual_prompt, narration, custom_media))
# Column 3: Settings & Output
with gr.Column(scale=1):
gr.Markdown("### 3. Video Settings")
resolution = gr.Radio(["Short (1080x1920)", "Full HD (1920x1080)"], label="Resolution", value="Full HD (1920x1080)")
render_speed = gr.Dropdown(["ultrafast", "faster", "fast", "medium", "slow", "slower", "veryslow"], label="Render Speed", value="fast")
video_clip_percent = gr.Slider(0, 100, value=25, label="Video Clip Percentage")
zoom_pan_effect = gr.Checkbox(label="Add Zoom/Pan Effect (Images)", value=True)
with gr.Accordion("Background Music", open=False):
bgm_upload = gr.Audio(label="Upload Background Music", type="filepath")
bgm_volume = gr.Slider(0.0, 1.0, value=0.15, label="BGM Volume")
with gr.Accordion("Subtitle Settings", open=True):
subtitles_enabled = gr.Checkbox(label="Enable Subtitles", value=True)
font = gr.Dropdown(["Impact", "Arial", "Times New Roman"], label="Font", value="Arial")
font_size = gr.Number(label="Font Size", value=45)
outline_width = gr.Number(label="Outline Width", value=2)
font_color = gr.ColorPicker(label="Font Color", value="#FFFFFF")
outline_color = gr.ColorPicker(label="Outline Color", value="#000000")
position = gr.Radio(["center", "bottom", "top"], label="Position", value="bottom")
generate_video_button = gr.Button("π¬ Generate Video")
gr.Markdown("### 4. Output")
output_video = gr.Video(label="Generated Video")
download_button = gr.File(label="Download Video")
# State to track number of clips
num_clips_state = gr.State(value=0)
# Event handlers
generate_button.click(
fn=process_script,
inputs=[topic_input, script_input],
outputs=[script_display, num_clips_state] +
[comp for acc in clip_accordions for comp in [acc[0], acc[1], acc[2], acc[3]]],
_js="() => {return [document.querySelector('#topic_input textarea').value, document.querySelector('#script_input textarea').value]}"
).then(
fn=lambda x: gr.update(visible=True),
inputs=[script_display],
outputs=[script_display]
)
generate_video_button.click(
fn=generate_video_full,
inputs=[resolution, render_speed, video_clip_percent, zoom_pan_effect,
bgm_upload, bgm_volume, subtitles_enabled, font, font_size,
outline_width, font_color, outline_color, position, num_clips_state] +
[comp for acc in clip_accordions for comp in acc[1:]], # visual_prompt, narration, custom_media
outputs=[output_video, download_button]
)
# Launch the interface
demo.launch(share=True) |