Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,88 +1,49 @@
|
|
1 |
-
|
2 |
-
|
3 |
# Import necessary libraries
|
4 |
from kokoro import KPipeline
|
5 |
-
|
6 |
-
import soundfile as sf
|
7 |
-
import torch
|
8 |
-
|
9 |
import soundfile as sf
|
10 |
import os
|
11 |
-
from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip
|
12 |
-
from PIL import Image
|
13 |
-
import tempfile
|
14 |
-
import random
|
15 |
-
import cv2
|
16 |
-
import math
|
17 |
-
import os, requests, io, time, re, random
|
18 |
from moviepy.editor import (
|
19 |
-
VideoFileClip, concatenate_videoclips, AudioFileClip,
|
20 |
-
CompositeVideoClip, TextClip, CompositeAudioClip
|
21 |
)
|
22 |
-
import
|
|
|
23 |
import shutil
|
24 |
-
import os
|
25 |
-
import moviepy.video.fx.all as vfx
|
26 |
import moviepy.config as mpy_config
|
27 |
from pydub import AudioSegment
|
28 |
-
from pydub.generators import Sine
|
29 |
-
|
30 |
-
from PIL import Image, ImageDraw, ImageFont
|
31 |
-
import numpy as np
|
32 |
-
from bs4 import BeautifulSoup
|
33 |
-
import base64
|
34 |
-
from urllib.parse import quote
|
35 |
-
import pysrt
|
36 |
from gtts import gTTS
|
37 |
-
import gradio as gr
|
|
|
|
|
38 |
|
39 |
# Initialize Kokoro TTS pipeline (using American English)
|
40 |
pipeline = KPipeline(lang_code='a') # Use voice 'af_heart' for American English
|
|
|
41 |
# Ensure ImageMagick binary is set
|
42 |
mpy_config.change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})
|
43 |
|
44 |
-
#
|
45 |
-
PEXELS_API_KEY = 'BhJqbcdm9Vi90KqzXKAhnEHGsuFNv4irXuOjWtT761U49lRzo03qBGna'
|
46 |
OPENROUTER_API_KEY = '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'
|
47 |
OPENROUTER_MODEL = "google/gemini-2.0-flash-exp:free"
|
48 |
OUTPUT_VIDEO_FILENAME = "final_video.mp4"
|
49 |
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
# Additional global variables needed for the Gradio interface
|
54 |
selected_voice = 'af_heart' # Default voice
|
55 |
voice_speed = 0.9 # Default voice speed
|
56 |
font_size = 45 # Default font size
|
57 |
-
video_clip_probability = 0.25 # Default probability for video clips
|
58 |
bg_music_volume = 0.08 # Default background music volume
|
59 |
fps = 30 # Default FPS
|
60 |
preset = "veryfast" # Default preset
|
61 |
-
TARGET_RESOLUTION = None
|
62 |
-
CAPTION_COLOR = None
|
63 |
-
TEMP_FOLDER = None
|
64 |
-
|
65 |
-
|
66 |
-
# ---------------- Helper Functions ---------------- #
|
67 |
-
# (Your existing helper functions remain unchanged: generate_script, parse_script,
|
68 |
-
# search_pexels_videos, search_pexels_images, search_google_images, download_image,
|
69 |
-
# download_video, generate_media, generate_tts, apply_kenburns_effect,
|
70 |
-
# resize_to_fill, find_mp3_files, add_background_music, create_clip,
|
71 |
-
# fix_imagemagick_policy)
|
72 |
-
|
73 |
-
# Define these globally as they were in your original code but will be set per run
|
74 |
-
TARGET_RESOLUTION = None
|
75 |
-
CAPTION_COLOR = None
|
76 |
-
TEMP_FOLDER = None
|
77 |
|
|
|
78 |
def generate_script(user_input):
|
79 |
-
"""Generate documentary script
|
80 |
headers = {
|
81 |
'Authorization': f'Bearer {OPENROUTER_API_KEY}',
|
82 |
'HTTP-Referer': 'https://your-domain.com',
|
83 |
'X-Title': 'AI Documentary Maker'
|
84 |
}
|
85 |
-
|
86 |
prompt = f"""You're a professional documentary narrator. Your job is to write a serious, natural, and informative video script based on one topic.
|
87 |
|
88 |
The script should sound like a real human voiceover from a TV show or documentary — clear, factual, and engaging, like something you'd hear on National Geographic or a news report.
|
@@ -125,18 +86,14 @@ Rising temperatures are causing coral bleaching and habitat loss.
|
|
125 |
|
126 |
Follow to explore more about the changing planet we live on.
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
Now here is the Topic/scrip: {user_input}
|
131 |
"""
|
132 |
-
|
133 |
data = {
|
134 |
'model': OPENROUTER_MODEL,
|
135 |
'messages': [{'role': 'user', 'content': prompt}],
|
136 |
'temperature': 0.4,
|
137 |
'max_tokens': 5000
|
138 |
}
|
139 |
-
|
140 |
try:
|
141 |
response = requests.post(
|
142 |
'https://openrouter.ai/api/v1/chat/completions',
|
@@ -144,343 +101,55 @@ Now here is the Topic/scrip: {user_input}
|
|
144 |
json=data,
|
145 |
timeout=30
|
146 |
)
|
147 |
-
|
148 |
if response.status_code == 200:
|
149 |
-
|
150 |
-
if 'choices' in response_data and len(response_data['choices']) > 0:
|
151 |
-
return response_data['choices'][0]['message']['content']
|
152 |
-
else:
|
153 |
-
print("Unexpected response format:", response_data)
|
154 |
-
return None
|
155 |
else:
|
156 |
print(f"API Error {response.status_code}: {response.text}")
|
157 |
return None
|
158 |
-
|
159 |
except Exception as e:
|
160 |
print(f"Request failed: {str(e)}")
|
161 |
return None
|
162 |
|
163 |
def parse_script(script_text):
|
164 |
-
"""
|
165 |
-
Parse the generated script into a list of elements.
|
166 |
-
For each section, create two elements:
|
167 |
-
- A 'media' element using the section title as the visual prompt.
|
168 |
-
- A 'tts' element with the narration text, voice info, and computed duration.
|
169 |
-
"""
|
170 |
sections = {}
|
171 |
current_title = None
|
172 |
current_text = ""
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
media_element = {"type": "media", "prompt": title, "effects": "fade-in"}
|
197 |
-
words = narration.split()
|
198 |
-
duration = max(3, len(words) * 0.5)
|
199 |
-
tts_element = {"type": "tts", "text": narration, "voice": "en", "duration": duration}
|
200 |
-
elements.append(media_element)
|
201 |
-
elements.append(tts_element)
|
202 |
-
|
203 |
-
return elements
|
204 |
-
except Exception as e:
|
205 |
-
print(f"Error parsing script: {e}")
|
206 |
-
return []
|
207 |
-
|
208 |
-
def search_pexels_videos(query, pexels_api_key):
|
209 |
-
"""Search for a video on Pexels by query and return a random HD video."""
|
210 |
-
headers = {'Authorization': pexels_api_key}
|
211 |
-
base_url = "https://api.pexels.com/videos/search"
|
212 |
-
num_pages = 3
|
213 |
-
videos_per_page = 15
|
214 |
-
|
215 |
-
max_retries = 3
|
216 |
-
retry_delay = 1
|
217 |
-
|
218 |
-
search_query = query
|
219 |
-
all_videos = []
|
220 |
-
|
221 |
-
for page in range(1, num_pages + 1):
|
222 |
-
for attempt in range(max_retries):
|
223 |
-
try:
|
224 |
-
params = {"query": search_query, "per_page": videos_per_page, "page": page}
|
225 |
-
response = requests.get(base_url, headers=headers, params=params, timeout=10)
|
226 |
-
|
227 |
-
if response.status_code == 200:
|
228 |
-
data = response.json()
|
229 |
-
videos = data.get("videos", [])
|
230 |
-
|
231 |
-
if not videos:
|
232 |
-
print(f"No videos found on page {page}.")
|
233 |
-
break
|
234 |
-
|
235 |
-
for video in videos:
|
236 |
-
video_files = video.get("video_files", [])
|
237 |
-
for file in video_files:
|
238 |
-
if file.get("quality") == "hd":
|
239 |
-
all_videos.append(file.get("link"))
|
240 |
-
break
|
241 |
-
|
242 |
-
break
|
243 |
-
|
244 |
-
elif response.status_code == 429:
|
245 |
-
print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
|
246 |
-
time.sleep(retry_delay)
|
247 |
-
retry_delay *= 2
|
248 |
-
else:
|
249 |
-
print(f"Error fetching videos: {response.status_code} {response.text}")
|
250 |
-
if attempt < max_retries - 1:
|
251 |
-
print(f"Retrying in {retry_delay} seconds...")
|
252 |
-
time.sleep(retry_delay)
|
253 |
-
retry_delay *= 2
|
254 |
-
else:
|
255 |
-
break
|
256 |
-
|
257 |
-
except requests.exceptions.RequestException as e:
|
258 |
-
print(f"Request exception: {e}")
|
259 |
-
if attempt < max_retries - 1:
|
260 |
-
print(f"Retrying in {retry_delay} seconds...")
|
261 |
-
time.sleep(retry_delay)
|
262 |
-
retry_delay *= 2
|
263 |
-
else:
|
264 |
-
break
|
265 |
-
|
266 |
-
if all_videos:
|
267 |
-
random_video = random.choice(all_videos)
|
268 |
-
print(f"Selected random video from {len(all_videos)} HD videos")
|
269 |
-
return random_video
|
270 |
-
else:
|
271 |
-
print("No suitable videos found after searching all pages.")
|
272 |
-
return None
|
273 |
-
|
274 |
-
def search_pexels_images(query, pexels_api_key):
|
275 |
-
"""Search for an image on Pexels by query."""
|
276 |
-
headers = {'Authorization': pexels_api_key}
|
277 |
-
url = "https://api.pexels.com/v1/search"
|
278 |
-
params = {"query": query, "per_page": 5, "orientation": "landscape"}
|
279 |
-
|
280 |
-
max_retries = 3
|
281 |
-
retry_delay = 1
|
282 |
-
|
283 |
-
for attempt in range(max_retries):
|
284 |
-
try:
|
285 |
-
response = requests.get(url, headers=headers, params=params, timeout=10)
|
286 |
-
|
287 |
-
if response.status_code == 200:
|
288 |
-
data = response.json()
|
289 |
-
photos = data.get("photos", [])
|
290 |
-
if photos:
|
291 |
-
photo = random.choice(photos[:min(5, len(photos))])
|
292 |
-
img_url = photo.get("src", {}).get("original")
|
293 |
-
return img_url
|
294 |
-
else:
|
295 |
-
print(f"No images found for query: {query}")
|
296 |
-
return None
|
297 |
-
|
298 |
-
elif response.status_code == 429:
|
299 |
-
print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
|
300 |
-
time.sleep(retry_delay)
|
301 |
-
retry_delay *= 2
|
302 |
-
else:
|
303 |
-
print(f"Error fetching images: {response.status_code} {response.text}")
|
304 |
-
if attempt < max_retries - 1:
|
305 |
-
print(f"Retrying in {retry_delay} seconds...")
|
306 |
-
time.sleep(retry_delay)
|
307 |
-
retry_delay *= 2
|
308 |
-
|
309 |
-
except requests.exceptions.RequestException as e:
|
310 |
-
print(f"Request exception: {e}")
|
311 |
-
if attempt < max_retries - 1:
|
312 |
-
print(f"Retrying in {retry_delay} seconds...")
|
313 |
-
time.sleep(retry_delay)
|
314 |
-
retry_delay *= 2
|
315 |
-
|
316 |
-
print(f"No Pexels images found for query: {query} after all attempts")
|
317 |
-
return None
|
318 |
-
|
319 |
-
def search_google_images(query):
|
320 |
-
"""Search for images on Google Images (for news-related queries)"""
|
321 |
-
try:
|
322 |
-
search_url = f"https://www.google.com/search?q={quote(query)}&tbm=isch"
|
323 |
-
headers = {"User-Agent": USER_AGENT}
|
324 |
-
response = requests.get(search_url, headers=headers, timeout=10)
|
325 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
326 |
-
|
327 |
-
img_tags = soup.find_all("img")
|
328 |
-
image_urls = []
|
329 |
-
for img in img_tags:
|
330 |
-
src = img.get("src", "")
|
331 |
-
if src.startswith("http") and "gstatic" not in src:
|
332 |
-
image_urls.append(src)
|
333 |
-
|
334 |
-
if image_urls:
|
335 |
-
return random.choice(image_urls[:5]) if len(image_urls) >= 5 else image_urls[0]
|
336 |
-
else:
|
337 |
-
print(f"No Google Images found for query: {query}")
|
338 |
-
return None
|
339 |
-
except Exception as e:
|
340 |
-
print(f"Error in Google Images search: {e}")
|
341 |
-
return None
|
342 |
-
|
343 |
-
def download_image(image_url, filename):
|
344 |
-
"""Download an image from a URL to a local file with enhanced error handling."""
|
345 |
-
try:
|
346 |
-
headers = {"User-Agent": USER_AGENT}
|
347 |
-
print(f"Downloading image from: {image_url} to {filename}")
|
348 |
-
response = requests.get(image_url, headers=headers, stream=True, timeout=15)
|
349 |
-
response.raise_for_status()
|
350 |
-
|
351 |
-
with open(filename, 'wb') as f:
|
352 |
-
for chunk in response.iter_content(chunk_size=8192):
|
353 |
-
f.write(chunk)
|
354 |
-
|
355 |
-
print(f"Image downloaded successfully to: {filename}")
|
356 |
-
|
357 |
-
try:
|
358 |
-
img = Image.open(filename)
|
359 |
-
img.verify()
|
360 |
-
img = Image.open(filename)
|
361 |
-
if img.mode != 'RGB':
|
362 |
-
img = img.convert('RGB')
|
363 |
-
img.save(filename)
|
364 |
-
print(f"Image validated and processed: {filename}")
|
365 |
-
return filename
|
366 |
-
except Exception as e_validate:
|
367 |
-
print(f"Downloaded file is not a valid image: {e_validate}")
|
368 |
-
if os.path.exists(filename):
|
369 |
-
os.remove(filename)
|
370 |
-
return None
|
371 |
-
|
372 |
-
except requests.exceptions.RequestException as e_download:
|
373 |
-
print(f"Image download error: {e_download}")
|
374 |
-
if os.path.exists(filename):
|
375 |
-
os.remove(filename)
|
376 |
-
return None
|
377 |
-
except Exception as e_general:
|
378 |
-
print(f"General error during image processing: {e_general}")
|
379 |
-
if os.path.exists(filename):
|
380 |
-
os.remove(filename)
|
381 |
-
return None
|
382 |
-
|
383 |
-
def download_video(video_url, filename):
|
384 |
-
"""Download a video from a URL to a local file."""
|
385 |
-
try:
|
386 |
-
response = requests.get(video_url, stream=True, timeout=30)
|
387 |
-
response.raise_for_status()
|
388 |
-
with open(filename, 'wb') as f:
|
389 |
-
for chunk in response.iter_content(chunk_size=8192):
|
390 |
-
f.write(chunk)
|
391 |
-
print(f"Video downloaded successfully to: {filename}")
|
392 |
-
return filename
|
393 |
-
except Exception as e:
|
394 |
-
print(f"Video download error: {e}")
|
395 |
-
if os.path.exists(filename):
|
396 |
-
os.remove(filename)
|
397 |
-
return None
|
398 |
-
|
399 |
-
def generate_media(prompt, user_image=None, current_index=0, total_segments=1):
|
400 |
-
"""
|
401 |
-
Generate a visual asset by first searching for a video or using a specific search strategy.
|
402 |
-
For news-related queries, use Google Images.
|
403 |
-
Returns a dict: {'path': <file_path>, 'asset_type': 'video' or 'image'}.
|
404 |
-
"""
|
405 |
-
safe_prompt = re.sub(r'[^\w\s-]', '', prompt).strip().replace(' ', '_')
|
406 |
-
|
407 |
-
if "news" in prompt.lower():
|
408 |
-
print(f"News-related query detected: {prompt}. Using Google Images...")
|
409 |
-
image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_news.jpg")
|
410 |
-
image_url = search_google_images(prompt)
|
411 |
-
if image_url:
|
412 |
-
downloaded_image = download_image(image_url, image_file)
|
413 |
-
if downloaded_image:
|
414 |
-
print(f"News image saved to {downloaded_image}")
|
415 |
-
return {"path": downloaded_image, "asset_type": "image"}
|
416 |
-
else:
|
417 |
-
print(f"Google Images search failed for prompt: {prompt}")
|
418 |
-
|
419 |
-
if random.random() < video_clip_probability:
|
420 |
-
video_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_video.mp4")
|
421 |
-
video_url = search_pexels_videos(prompt, PEXELS_API_KEY)
|
422 |
-
if video_url:
|
423 |
-
downloaded_video = download_video(video_url, video_file)
|
424 |
-
if downloaded_video:
|
425 |
-
print(f"Video asset saved to {downloaded_video}")
|
426 |
-
return {"path": downloaded_video, "asset_type": "video"}
|
427 |
-
else:
|
428 |
-
print(f"Pexels video search failed for prompt: {prompt}")
|
429 |
-
|
430 |
-
image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}.jpg")
|
431 |
-
image_url = search_pexels_images(prompt, PEXELS_API_KEY)
|
432 |
-
if image_url:
|
433 |
-
downloaded_image = download_image(image_url, image_file)
|
434 |
-
if downloaded_image:
|
435 |
-
print(f"Image asset saved to {downloaded_image}")
|
436 |
-
return {"path": downloaded_image, "asset_type": "image"}
|
437 |
-
else:
|
438 |
-
print(f"Pexels image download failed for prompt: {prompt}")
|
439 |
-
|
440 |
-
fallback_terms = ["nature", "people", "landscape", "technology", "business"]
|
441 |
-
for term in fallback_terms:
|
442 |
-
print(f"Trying fallback image search with term: {term}")
|
443 |
-
fallback_file = os.path.join(TEMP_FOLDER, f"fallback_{term}.jpg")
|
444 |
-
fallback_url = search_pexels_images(term, PEXELS_API_KEY)
|
445 |
-
if fallback_url:
|
446 |
-
downloaded_fallback = download_image(fallback_url, fallback_file)
|
447 |
-
if downloaded_fallback:
|
448 |
-
print(f"Fallback image saved to {downloaded_fallback}")
|
449 |
-
return {"path": downloaded_fallback, "asset_type": "image"}
|
450 |
-
else:
|
451 |
-
print(f"Fallback image download failed for term: {term}")
|
452 |
-
else:
|
453 |
-
print(f"Fallback image search failed for term: {term}")
|
454 |
-
|
455 |
-
print(f"Failed to generate visual asset for prompt: {prompt}")
|
456 |
-
return None
|
457 |
-
|
458 |
-
def generate_silent_audio(duration, sample_rate=24000):
|
459 |
-
"""Generate a silent WAV audio file lasting 'duration' seconds."""
|
460 |
-
num_samples = int(duration * sample_rate)
|
461 |
-
silence = np.zeros(num_samples, dtype=np.float32)
|
462 |
-
silent_path = os.path.join(TEMP_FOLDER, f"silent_{int(time.time())}.wav")
|
463 |
-
sf.write(silent_path, silence, sample_rate)
|
464 |
-
print(f"Silent audio generated: {silent_path}")
|
465 |
-
return silent_path
|
466 |
|
467 |
def generate_tts(text, voice):
|
468 |
-
"""
|
469 |
-
Generate TTS audio using Kokoro, falling back to gTTS or silent audio if needed.
|
470 |
-
"""
|
471 |
safe_text = re.sub(r'[^\w\s-]', '', text[:10]).strip().replace(' ', '_')
|
472 |
file_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.wav")
|
473 |
-
|
474 |
if os.path.exists(file_path):
|
475 |
print(f"Using cached TTS for text '{text[:10]}...'")
|
476 |
return file_path
|
477 |
-
|
478 |
try:
|
479 |
kokoro_voice = selected_voice if voice == 'en' else voice
|
480 |
generator = pipeline(text, voice=kokoro_voice, speed=voice_speed, split_pattern=r'\n+')
|
481 |
-
audio_segments = []
|
482 |
-
for i, (gs, ps, audio) in enumerate(generator):
|
483 |
-
audio_segments.append(audio)
|
484 |
full_audio = np.concatenate(audio_segments) if len(audio_segments) > 1 else audio_segments[0]
|
485 |
sf.write(file_path, full_audio, 24000)
|
486 |
print(f"TTS audio saved to {file_path} (Kokoro)")
|
@@ -499,89 +168,13 @@ def generate_tts(text, voice):
|
|
499 |
return file_path
|
500 |
except Exception as fallback_error:
|
501 |
print(f"Both TTS methods failed: {fallback_error}")
|
502 |
-
return
|
503 |
-
|
504 |
-
def apply_kenburns_effect(clip, target_resolution, effect_type=None):
|
505 |
-
"""Apply a smooth Ken Burns effect with a single movement pattern."""
|
506 |
-
target_w, target_h = target_resolution
|
507 |
-
clip_aspect = clip.w / clip.h
|
508 |
-
target_aspect = target_w / target_h
|
509 |
-
|
510 |
-
if clip_aspect > target_aspect:
|
511 |
-
new_height = target_h
|
512 |
-
new_width = int(new_height * clip_aspect)
|
513 |
-
else:
|
514 |
-
new_width = target_w
|
515 |
-
new_height = int(new_width / clip_aspect)
|
516 |
-
|
517 |
-
clip = clip.resize(newsize=(new_width, new_height))
|
518 |
-
base_scale = 1.15
|
519 |
-
new_width = int(new_width * base_scale)
|
520 |
-
new_height = int(new_height * base_scale)
|
521 |
-
clip = clip.resize(newsize=(new_width, new_height))
|
522 |
-
|
523 |
-
max_offset_x = new_width - target_w
|
524 |
-
max_offset_y = new_height - target_h
|
525 |
-
|
526 |
-
available_effects = ["zoom-in", "zoom-out", "pan-left", "pan-right", "up-left"]
|
527 |
-
if effect_type is None or effect_type == "random":
|
528 |
-
effect_type = random.choice(available_effects)
|
529 |
-
|
530 |
-
if effect_type == "zoom-in":
|
531 |
-
start_zoom = 0.9
|
532 |
-
end_zoom = 1.1
|
533 |
-
start_center = (new_width / 2, new_height / 2)
|
534 |
-
end_center = start_center
|
535 |
-
elif effect_type == "zoom-out":
|
536 |
-
start_zoom = 1.1
|
537 |
-
end_zoom = 0.9
|
538 |
-
start_center = (new_width / 2, new_height / 2)
|
539 |
-
end_center = start_center
|
540 |
-
elif effect_type == "pan-left":
|
541 |
-
start_zoom = 1.0
|
542 |
-
end_zoom = 1.0
|
543 |
-
start_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
|
544 |
-
end_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
|
545 |
-
elif effect_type == "pan-right":
|
546 |
-
start_zoom = 1.0
|
547 |
-
end_zoom = 1.0
|
548 |
-
start_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
|
549 |
-
end_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
|
550 |
-
elif effect_type == "up-left":
|
551 |
-
start_zoom = 1.0
|
552 |
-
end_zoom = 1.0
|
553 |
-
start_center = (max_offset_x + target_w / 2, max_offset_y + target_h / 2)
|
554 |
-
end_center = (target_w / 2, target_h / 2)
|
555 |
-
else:
|
556 |
-
raise ValueError(f"Unsupported effect_type: {effect_type}")
|
557 |
-
|
558 |
-
def transform_frame(get_frame, t):
|
559 |
-
frame = get_frame(t)
|
560 |
-
ratio = t / clip.duration if clip.duration > 0 else 0
|
561 |
-
ratio = 0.5 - 0.5 * math.cos(math.pi * ratio)
|
562 |
-
current_zoom = start_zoom + (end_zoom - start_zoom) * ratio
|
563 |
-
crop_w = int(target_w / current_zoom)
|
564 |
-
crop_h = int(target_h / current_zoom)
|
565 |
-
current_center_x = start_center[0] + (end_center[0] - start_center[0]) * ratio
|
566 |
-
current_center_y = start_center[1] + (end_center[1] - start_center[1]) * ratio
|
567 |
-
min_center_x = crop_w / 2
|
568 |
-
max_center_x = new_width - crop_w / 2
|
569 |
-
min_center_y = crop_h / 2
|
570 |
-
max_center_y = new_height - crop_h / 2
|
571 |
-
current_center_x = max(min_center_x, min(current_center_x, max_center_x))
|
572 |
-
current_center_y = max(min_center_y, min(current_center_y, max_center_y))
|
573 |
-
cropped_frame = cv2.getRectSubPix(frame, (crop_w, crop_h), (current_center_x, current_center_y))
|
574 |
-
resized_frame = cv2.resize(cropped_frame, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
|
575 |
-
return resized_frame
|
576 |
-
|
577 |
-
return clip.fl(transform_frame)
|
578 |
|
579 |
def resize_to_fill(clip, target_resolution):
|
580 |
-
"""Resize and crop
|
581 |
target_w, target_h = target_resolution
|
582 |
clip_aspect = clip.w / clip.h
|
583 |
target_aspect = target_w / target_h
|
584 |
-
|
585 |
if clip_aspect > target_aspect:
|
586 |
clip = clip.resize(height=target_h)
|
587 |
crop_amount = (clip.w - target_w) / 2
|
@@ -590,25 +183,13 @@ def resize_to_fill(clip, target_resolution):
|
|
590 |
clip = clip.resize(width=target_w)
|
591 |
crop_amount = (clip.h - target_h) / 2
|
592 |
clip = clip.crop(x1=0, x2=clip.w, y1=crop_amount, y2=clip.h - crop_amount)
|
593 |
-
|
594 |
return clip
|
595 |
|
596 |
-
def
|
597 |
-
"""
|
598 |
-
mp3_files = []
|
599 |
-
for root, dirs, files in os.walk('.'):
|
600 |
-
for file in files:
|
601 |
-
if file.endswith('.mp3'):
|
602 |
-
mp3_path = os.path.join(root, file)
|
603 |
-
mp3_files.append(mp3_path)
|
604 |
-
print(f"Found MP3 file: {mp3_path}")
|
605 |
-
return mp3_files[0] if mp3_files else None
|
606 |
-
|
607 |
-
def add_background_music(final_video, bg_music_volume=0.10):
|
608 |
-
"""Add background music to the final video using any MP3 file found."""
|
609 |
try:
|
610 |
bg_music_path = "music.mp3"
|
611 |
-
if
|
612 |
print(f"Adding background music from: {bg_music_path}")
|
613 |
bg_music = AudioFileClip(bg_music_path)
|
614 |
if bg_music.duration < final_video.duration:
|
@@ -622,101 +203,51 @@ def add_background_music(final_video, bg_music_volume=0.10):
|
|
622 |
final_video = final_video.set_audio(mixed_audio)
|
623 |
print("Background music added successfully")
|
624 |
else:
|
625 |
-
print("No
|
626 |
return final_video
|
627 |
except Exception as e:
|
628 |
print(f"Error adding background music: {e}")
|
629 |
-
print("Continuing without background music")
|
630 |
return final_video
|
631 |
|
632 |
-
def create_clip(
|
633 |
-
"""Create a video clip with synchronized
|
634 |
try:
|
635 |
-
print(f"Creating clip #{segment_index}
|
636 |
-
|
637 |
-
|
638 |
-
return None
|
639 |
-
|
640 |
audio_clip = AudioFileClip(tts_path).audio_fadeout(0.2)
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
img = Image.open(media_path)
|
653 |
-
if img.mode != 'RGB':
|
654 |
-
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp:
|
655 |
-
img.convert('RGB').save(temp.name)
|
656 |
-
media_path = temp.name
|
657 |
-
img.close()
|
658 |
-
clip = ImageClip(media_path).set_duration(target_duration)
|
659 |
-
clip = apply_kenburns_effect(clip, TARGET_RESOLUTION)
|
660 |
-
clip = clip.fadein(0.3).fadeout(0.3)
|
661 |
-
else:
|
662 |
-
return None
|
663 |
-
|
664 |
-
if narration_text and CAPTION_COLOR != "transparent":
|
665 |
-
try:
|
666 |
-
words = narration_text.split()
|
667 |
-
chunks = []
|
668 |
-
current_chunk = []
|
669 |
-
for word in words:
|
670 |
-
current_chunk.append(word)
|
671 |
-
if len(current_chunk) >= 5:
|
672 |
-
chunks.append(' '.join(current_chunk))
|
673 |
-
current_chunk = []
|
674 |
-
if current_chunk:
|
675 |
-
chunks.append(' '.join(current_chunk))
|
676 |
-
|
677 |
-
chunk_duration = audio_duration / len(chunks)
|
678 |
-
subtitle_clips = []
|
679 |
-
subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
|
680 |
-
|
681 |
-
for i, chunk_text in enumerate(chunks):
|
682 |
-
start_time = i * chunk_duration
|
683 |
-
end_time = (i + 1) * chunk_duration
|
684 |
-
txt_clip = TextClip(
|
685 |
-
chunk_text,
|
686 |
-
fontsize=45,
|
687 |
-
font='Arial-Bold',
|
688 |
-
color=CAPTION_COLOR,
|
689 |
-
bg_color='rgba(0, 0, 0, 0.25)',
|
690 |
-
method='caption',
|
691 |
-
align='center',
|
692 |
-
stroke_width=2,
|
693 |
-
stroke_color=CAPTION_COLOR,
|
694 |
-
size=(TARGET_RESOLUTION[0] * 0.8, None)
|
695 |
-
).set_start(start_time).set_end(end_time)
|
696 |
-
txt_clip = txt_clip.set_position(('center', subtitle_y_position))
|
697 |
-
subtitle_clips.append(txt_clip)
|
698 |
-
|
699 |
-
clip = CompositeVideoClip([clip] + subtitle_clips)
|
700 |
-
except Exception as sub_error:
|
701 |
-
print(f"Subtitle error: {sub_error}")
|
702 |
txt_clip = TextClip(
|
703 |
-
|
704 |
fontsize=font_size,
|
|
|
705 |
color=CAPTION_COLOR,
|
|
|
|
|
706 |
align='center',
|
707 |
-
size=(TARGET_RESOLUTION[0] * 0.
|
708 |
-
).set_position(('center', int(TARGET_RESOLUTION[1]
|
709 |
-
|
710 |
-
|
711 |
-
|
712 |
-
print(f"Clip created: {
|
713 |
-
return
|
714 |
except Exception as e:
|
715 |
print(f"Error in create_clip: {str(e)}")
|
716 |
return None
|
717 |
|
718 |
def fix_imagemagick_policy():
|
719 |
-
"""Fix ImageMagick security policies."""
|
720 |
try:
|
721 |
print("Attempting to fix ImageMagick security policies...")
|
722 |
policy_paths = [
|
@@ -727,69 +258,29 @@ def fix_imagemagick_policy():
|
|
727 |
]
|
728 |
found_policy = next((path for path in policy_paths if os.path.exists(path)), None)
|
729 |
if not found_policy:
|
730 |
-
print("No policy.xml found.
|
731 |
return False
|
732 |
print(f"Modifying policy file at {found_policy}")
|
733 |
os.system(f"sudo cp {found_policy} {found_policy}.bak")
|
734 |
os.system(f"sudo sed -i 's/rights=\"none\"/rights=\"read|write\"/g' {found_policy}")
|
735 |
os.system(f"sudo sed -i 's/<policy domain=\"path\" pattern=\"@\*\"[^>]*>/<policy domain=\"path\" pattern=\"@*\" rights=\"read|write\"/g' {found_policy}")
|
736 |
-
os.system(f"sudo sed -i 's/<policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"[^>]*>/<!-- <policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"> -->/g' {found_policy}")
|
737 |
print("ImageMagick policies updated successfully.")
|
738 |
return True
|
739 |
except Exception as e:
|
740 |
print(f"Error fixing policies: {e}")
|
741 |
return False
|
742 |
|
743 |
-
|
744 |
-
|
745 |
-
|
746 |
-
|
747 |
-
|
748 |
-
|
749 |
-
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
|
758 |
-
|
759 |
-
|
760 |
-
|
761 |
-
|
762 |
-
|
763 |
-
|
764 |
-
|
765 |
-
|
766 |
-
|
767 |
-
|
768 |
-
|
769 |
-
# ---------------- Main Video Generation Function ---------------- #
|
770 |
def generate_video(user_input, resolution, caption_option):
|
771 |
-
"""Generate a video
|
772 |
global TARGET_RESOLUTION, CAPTION_COLOR, TEMP_FOLDER
|
773 |
-
|
774 |
-
# Set resolution
|
775 |
-
if resolution == "Full":
|
776 |
-
TARGET_RESOLUTION = (1920, 1080)
|
777 |
-
elif resolution == "Short":
|
778 |
-
TARGET_RESOLUTION = (1080, 1920)
|
779 |
-
else:
|
780 |
-
TARGET_RESOLUTION = (1920, 1080) # Default
|
781 |
-
|
782 |
-
# Set caption color
|
783 |
CAPTION_COLOR = "white" if caption_option == "Yes" else "transparent"
|
784 |
-
|
785 |
-
# Create a unique temporary folder
|
786 |
TEMP_FOLDER = tempfile.mkdtemp()
|
787 |
-
|
788 |
-
# Fix ImageMagick policy
|
789 |
fix_success = fix_imagemagick_policy()
|
790 |
if not fix_success:
|
791 |
-
print("
|
792 |
-
|
793 |
print("Generating script from API...")
|
794 |
script = generate_script(user_input)
|
795 |
if not script:
|
@@ -797,40 +288,48 @@ def generate_video(user_input, resolution, caption_option):
|
|
797 |
shutil.rmtree(TEMP_FOLDER)
|
798 |
return None
|
799 |
print("Generated Script:\n", script)
|
|
|
800 |
elements = parse_script(script)
|
801 |
if not elements:
|
802 |
print("Failed to parse script into elements.")
|
803 |
shutil.rmtree(TEMP_FOLDER)
|
804 |
return None
|
805 |
-
print(f"Parsed {len(elements)
|
806 |
-
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
-
paired_elements.append((elements[i], elements[i + 1]))
|
811 |
-
|
812 |
-
if not paired_elements:
|
813 |
-
print("No valid script segments found.")
|
814 |
shutil.rmtree(TEMP_FOLDER)
|
815 |
return None
|
816 |
-
|
|
|
|
|
|
|
|
|
|
|
817 |
clips = []
|
818 |
-
for idx,
|
819 |
-
print(f"\nProcessing segment {idx+1}/{len(
|
820 |
-
media_asset = generate_media(media_elem['prompt'], current_index=idx, total_segments=len(paired_elements))
|
821 |
-
if not media_asset:
|
822 |
-
print(f"Skipping segment {idx+1} due to missing media asset.")
|
823 |
-
continue
|
824 |
tts_path = generate_tts(tts_elem['text'], tts_elem['voice'])
|
825 |
if not tts_path:
|
826 |
-
print(f"Skipping segment {idx+1} due to TTS
|
827 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
828 |
clip = create_clip(
|
829 |
-
|
830 |
-
|
|
|
831 |
tts_path=tts_path,
|
832 |
-
duration=tts_elem['duration'],
|
833 |
-
effects=media_elem.get('effects', 'fade-in'),
|
834 |
narration_text=tts_elem['text'],
|
835 |
segment_index=idx
|
836 |
)
|
@@ -838,28 +337,27 @@ def generate_video(user_input, resolution, caption_option):
|
|
838 |
clips.append(clip)
|
839 |
else:
|
840 |
print(f"Clip creation failed for segment {idx+1}.")
|
841 |
-
|
842 |
if not clips:
|
843 |
print("No clips were successfully created.")
|
844 |
shutil.rmtree(TEMP_FOLDER)
|
845 |
return None
|
846 |
-
|
847 |
print("\nConcatenating clips...")
|
848 |
final_video = concatenate_videoclips(clips, method="compose")
|
849 |
final_video = add_background_music(final_video, bg_music_volume=bg_music_volume)
|
850 |
-
|
851 |
print(f"Exporting final video to {OUTPUT_VIDEO_FILENAME}...")
|
852 |
final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', fps=fps, preset=preset)
|
853 |
print(f"Final video saved as {OUTPUT_VIDEO_FILENAME}")
|
854 |
-
|
855 |
-
# Clean up
|
856 |
print("Cleaning up temporary files...")
|
857 |
shutil.rmtree(TEMP_FOLDER)
|
858 |
print("Temporary files removed.")
|
859 |
-
|
860 |
return OUTPUT_VIDEO_FILENAME
|
861 |
|
862 |
-
#
|
863 |
VOICE_CHOICES = {
|
864 |
'Emma (Female)': 'af_heart',
|
865 |
'Bella (Female)': 'af_bella',
|
@@ -892,48 +390,40 @@ VOICE_CHOICES = {
|
|
892 |
}
|
893 |
|
894 |
def generate_video_with_options(user_input, resolution, caption_option, music_file, voice, vclip_prob, bg_vol, video_fps, video_preset, v_speed, caption_size):
|
895 |
-
|
896 |
-
|
897 |
-
# Update global variables with user selections
|
898 |
selected_voice = VOICE_CHOICES[voice]
|
899 |
voice_speed = v_speed
|
900 |
font_size = caption_size
|
901 |
-
video_clip_probability = vclip_prob / 100 # Convert from percentage to decimal
|
902 |
bg_music_volume = bg_vol
|
903 |
fps = video_fps
|
904 |
preset = video_preset
|
905 |
-
|
906 |
-
# Handle music upload
|
907 |
if music_file is not None:
|
908 |
target_path = "music.mp3"
|
909 |
shutil.copy(music_file.name, target_path)
|
910 |
print(f"Uploaded music saved as: {target_path}")
|
911 |
-
|
912 |
-
# Generate the video
|
913 |
return generate_video(user_input, resolution, caption_option)
|
914 |
|
915 |
-
# Create the Gradio interface
|
916 |
iface = gr.Interface(
|
917 |
fn=generate_video_with_options,
|
918 |
inputs=[
|
919 |
gr.Textbox(label="Video Concept", placeholder="Enter your video concept here..."),
|
920 |
-
gr.Radio(["Full", "Short"], label="Resolution", value="
|
921 |
-
gr.Radio(["No"], label="Captions
|
922 |
gr.File(label="Upload Background Music (MP3)", file_types=[".mp3"]),
|
923 |
gr.Dropdown(choices=list(VOICE_CHOICES.keys()), label="Choose Voice", value="Emma (Female)"),
|
924 |
-
gr.Slider(0, 100, value=25, step=1, label="Video Clip Usage Probability (%)"),
|
925 |
gr.Slider(0.0, 1.0, value=0.08, step=0.01, label="Background Music Volume"),
|
926 |
gr.Slider(10, 60, value=30, step=1, label="Video FPS"),
|
927 |
gr.Dropdown(choices=["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow"],
|
928 |
value="veryfast", label="Export Preset"),
|
929 |
-
gr.Slider(0.5, 1.5, value=
|
930 |
gr.Slider(20, 100, value=45, step=1, label="Caption Font Size")
|
931 |
],
|
932 |
outputs=gr.Video(label="Generated Video"),
|
933 |
title="AI Documentary Video Generator",
|
934 |
-
description="Create short documentary videos
|
935 |
)
|
936 |
|
937 |
-
# Launch the interface
|
938 |
if __name__ == "__main__":
|
939 |
iface.launch(share=True)
|
|
|
|
|
|
|
1 |
# Import necessary libraries
|
2 |
from kokoro import KPipeline
|
|
|
|
|
|
|
|
|
3 |
import soundfile as sf
|
4 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from moviepy.editor import (
|
6 |
+
VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeVideoClip, TextClip, CompositeAudioClip
|
|
|
7 |
)
|
8 |
+
import tempfile
|
9 |
+
import random
|
10 |
import shutil
|
|
|
|
|
11 |
import moviepy.config as mpy_config
|
12 |
from pydub import AudioSegment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
from gtts import gTTS
|
14 |
+
import gradio as gr
|
15 |
+
import requests
|
16 |
+
import re
|
17 |
|
18 |
# Initialize Kokoro TTS pipeline (using American English)
|
19 |
pipeline = KPipeline(lang_code='a') # Use voice 'af_heart' for American English
|
20 |
+
|
21 |
# Ensure ImageMagick binary is set
|
22 |
mpy_config.change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})
|
23 |
|
24 |
+
# Global Configuration
|
|
|
25 |
OPENROUTER_API_KEY = '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'
|
26 |
OPENROUTER_MODEL = "google/gemini-2.0-flash-exp:free"
|
27 |
OUTPUT_VIDEO_FILENAME = "final_video.mp4"
|
28 |
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
29 |
+
TARGET_RESOLUTION = (1080, 1920) # Fixed vertical resolution
|
30 |
+
CAPTION_COLOR = None
|
31 |
+
TEMP_FOLDER = None
|
|
|
32 |
selected_voice = 'af_heart' # Default voice
|
33 |
voice_speed = 0.9 # Default voice speed
|
34 |
font_size = 45 # Default font size
|
|
|
35 |
bg_music_volume = 0.08 # Default background music volume
|
36 |
fps = 30 # Default FPS
|
37 |
preset = "veryfast" # Default preset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
# Helper Functions
|
40 |
def generate_script(user_input):
|
41 |
+
"""Generate a documentary script based on user input."""
|
42 |
headers = {
|
43 |
'Authorization': f'Bearer {OPENROUTER_API_KEY}',
|
44 |
'HTTP-Referer': 'https://your-domain.com',
|
45 |
'X-Title': 'AI Documentary Maker'
|
46 |
}
|
|
|
47 |
prompt = f"""You're a professional documentary narrator. Your job is to write a serious, natural, and informative video script based on one topic.
|
48 |
|
49 |
The script should sound like a real human voiceover from a TV show or documentary — clear, factual, and engaging, like something you'd hear on National Geographic or a news report.
|
|
|
86 |
|
87 |
Follow to explore more about the changing planet we live on.
|
88 |
|
89 |
+
Topic: {user_input}
|
|
|
|
|
90 |
"""
|
|
|
91 |
data = {
|
92 |
'model': OPENROUTER_MODEL,
|
93 |
'messages': [{'role': 'user', 'content': prompt}],
|
94 |
'temperature': 0.4,
|
95 |
'max_tokens': 5000
|
96 |
}
|
|
|
97 |
try:
|
98 |
response = requests.post(
|
99 |
'https://openrouter.ai/api/v1/chat/completions',
|
|
|
101 |
json=data,
|
102 |
timeout=30
|
103 |
)
|
|
|
104 |
if response.status_code == 200:
|
105 |
+
return response.json()['choices'][0]['message']['content']
|
|
|
|
|
|
|
|
|
|
|
106 |
else:
|
107 |
print(f"API Error {response.status_code}: {response.text}")
|
108 |
return None
|
|
|
109 |
except Exception as e:
|
110 |
print(f"Request failed: {str(e)}")
|
111 |
return None
|
112 |
|
113 |
def parse_script(script_text):
|
114 |
+
"""Parse the script into narration elements."""
|
|
|
|
|
|
|
|
|
|
|
115 |
sections = {}
|
116 |
current_title = None
|
117 |
current_text = ""
|
118 |
+
for line in script_text.splitlines():
|
119 |
+
line = line.strip()
|
120 |
+
if line.startswith("[") and "]" in line:
|
121 |
+
bracket_start = line.find("[")
|
122 |
+
bracket_end = line.find("]", bracket_start)
|
123 |
+
if bracket_start != -1 and bracket_end != -1:
|
124 |
+
if current_title is not None:
|
125 |
+
sections[current_title] = current_text.strip()
|
126 |
+
current_title = line[bracket_start+1:bracket_end]
|
127 |
+
current_text = line[bracket_end+1:].strip()
|
128 |
+
elif current_title:
|
129 |
+
current_text += line + " "
|
130 |
+
if current_title:
|
131 |
+
sections[current_title] = current_text.strip()
|
132 |
+
elements = []
|
133 |
+
for title, narration in sections.items():
|
134 |
+
if not narration:
|
135 |
+
continue
|
136 |
+
words = narration.split()
|
137 |
+
duration = max(3, len(words) * 0.5) # Initial estimate, actual duration from TTS
|
138 |
+
tts_element = {"type": "tts", "text": narration, "voice": "en", "duration": duration}
|
139 |
+
elements.append(tts_element)
|
140 |
+
return elements
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
def generate_tts(text, voice):
|
143 |
+
"""Generate TTS audio using Kokoro or gTTS as fallback."""
|
|
|
|
|
144 |
safe_text = re.sub(r'[^\w\s-]', '', text[:10]).strip().replace(' ', '_')
|
145 |
file_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.wav")
|
|
|
146 |
if os.path.exists(file_path):
|
147 |
print(f"Using cached TTS for text '{text[:10]}...'")
|
148 |
return file_path
|
|
|
149 |
try:
|
150 |
kokoro_voice = selected_voice if voice == 'en' else voice
|
151 |
generator = pipeline(text, voice=kokoro_voice, speed=voice_speed, split_pattern=r'\n+')
|
152 |
+
audio_segments = [audio for _, _, audio in generator]
|
|
|
|
|
153 |
full_audio = np.concatenate(audio_segments) if len(audio_segments) > 1 else audio_segments[0]
|
154 |
sf.write(file_path, full_audio, 24000)
|
155 |
print(f"TTS audio saved to {file_path} (Kokoro)")
|
|
|
168 |
return file_path
|
169 |
except Exception as fallback_error:
|
170 |
print(f"Both TTS methods failed: {fallback_error}")
|
171 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
def resize_to_fill(clip, target_resolution):
|
174 |
+
"""Resize and crop clip to fill the target resolution."""
|
175 |
target_w, target_h = target_resolution
|
176 |
clip_aspect = clip.w / clip.h
|
177 |
target_aspect = target_w / target_h
|
|
|
178 |
if clip_aspect > target_aspect:
|
179 |
clip = clip.resize(height=target_h)
|
180 |
crop_amount = (clip.w - target_w) / 2
|
|
|
183 |
clip = clip.resize(width=target_w)
|
184 |
crop_amount = (clip.h - target_h) / 2
|
185 |
clip = clip.crop(x1=0, x2=clip.w, y1=crop_amount, y2=clip.h - crop_amount)
|
|
|
186 |
return clip
|
187 |
|
188 |
+
def add_background_music(final_video, bg_music_volume=0.08):
|
189 |
+
"""Add background music to the final video."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
try:
|
191 |
bg_music_path = "music.mp3"
|
192 |
+
if os.path.exists(bg_music_path):
|
193 |
print(f"Adding background music from: {bg_music_path}")
|
194 |
bg_music = AudioFileClip(bg_music_path)
|
195 |
if bg_music.duration < final_video.duration:
|
|
|
203 |
final_video = final_video.set_audio(mixed_audio)
|
204 |
print("Background music added successfully")
|
205 |
else:
|
206 |
+
print("No music.mp3 found, skipping background music")
|
207 |
return final_video
|
208 |
except Exception as e:
|
209 |
print(f"Error adding background music: {e}")
|
|
|
210 |
return final_video
|
211 |
|
212 |
+
def create_clip(video_path, start_time, duration, tts_path, narration_text, segment_index):
|
213 |
+
"""Create a video clip with synchronized captions."""
|
214 |
try:
|
215 |
+
print(f"Creating clip #{segment_index} from {start_time:.2f} to {start_time + duration:.2f}")
|
216 |
+
video_clip = VideoFileClip(video_path).subclip(start_time, start_time + duration)
|
217 |
+
video_clip = resize_to_fill(video_clip, TARGET_RESOLUTION)
|
|
|
|
|
218 |
audio_clip = AudioFileClip(tts_path).audio_fadeout(0.2)
|
219 |
+
video_clip = video_clip.set_audio(audio_clip)
|
220 |
+
|
221 |
+
if CAPTION_COLOR != "transparent" and narration_text:
|
222 |
+
words = narration_text.split()
|
223 |
+
chunks = [words[i:i+5] for i in range(0, len(words), 5)]
|
224 |
+
chunk_duration = duration / len(chunks) if len(chunks) > 0 else duration
|
225 |
+
subtitle_clips = []
|
226 |
+
for i, chunk in enumerate(chunks):
|
227 |
+
chunk_text = ' '.join(chunk)
|
228 |
+
start_time = i * chunk_duration
|
229 |
+
end_time = (i + 1) * chunk_duration if i < len(chunks) - 1 else duration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
txt_clip = TextClip(
|
231 |
+
chunk_text,
|
232 |
fontsize=font_size,
|
233 |
+
font='Arial-Bold',
|
234 |
color=CAPTION_COLOR,
|
235 |
+
bg_color='rgba(0, 0, 0, 0.25)',
|
236 |
+
method='caption',
|
237 |
align='center',
|
238 |
+
size=(TARGET_RESOLUTION[0] * 0.8, None)
|
239 |
+
).set_start(start_time).set_end(end_time).set_position(('center', int(TARGET_RESOLUTION[1] * 0.85)))
|
240 |
+
subtitle_clips.append(txt_clip)
|
241 |
+
video_clip = CompositeVideoClip([video_clip] + subtitle_clips)
|
242 |
+
|
243 |
+
print(f"Clip created: {video_clip.duration:.1f}s")
|
244 |
+
return video_clip
|
245 |
except Exception as e:
|
246 |
print(f"Error in create_clip: {str(e)}")
|
247 |
return None
|
248 |
|
249 |
def fix_imagemagick_policy():
|
250 |
+
"""Fix ImageMagick security policies for text rendering."""
|
251 |
try:
|
252 |
print("Attempting to fix ImageMagick security policies...")
|
253 |
policy_paths = [
|
|
|
258 |
]
|
259 |
found_policy = next((path for path in policy_paths if os.path.exists(path)), None)
|
260 |
if not found_policy:
|
261 |
+
print("No policy.xml found. Text rendering may fail.")
|
262 |
return False
|
263 |
print(f"Modifying policy file at {found_policy}")
|
264 |
os.system(f"sudo cp {found_policy} {found_policy}.bak")
|
265 |
os.system(f"sudo sed -i 's/rights=\"none\"/rights=\"read|write\"/g' {found_policy}")
|
266 |
os.system(f"sudo sed -i 's/<policy domain=\"path\" pattern=\"@\*\"[^>]*>/<policy domain=\"path\" pattern=\"@*\" rights=\"read|write\"/g' {found_policy}")
|
|
|
267 |
print("ImageMagick policies updated successfully.")
|
268 |
return True
|
269 |
except Exception as e:
|
270 |
print(f"Error fixing policies: {e}")
|
271 |
return False
|
272 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
def generate_video(user_input, resolution, caption_option):
|
274 |
+
"""Generate a video using video.mp4 with synchronized voice and captions."""
|
275 |
global TARGET_RESOLUTION, CAPTION_COLOR, TEMP_FOLDER
|
276 |
+
TARGET_RESOLUTION = (1080, 1920) # Fixed as per requirement
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
CAPTION_COLOR = "white" if caption_option == "Yes" else "transparent"
|
|
|
|
|
278 |
TEMP_FOLDER = tempfile.mkdtemp()
|
279 |
+
|
|
|
280 |
fix_success = fix_imagemagick_policy()
|
281 |
if not fix_success:
|
282 |
+
print("Proceeding without ImageMagick policy fix.")
|
283 |
+
|
284 |
print("Generating script from API...")
|
285 |
script = generate_script(user_input)
|
286 |
if not script:
|
|
|
288 |
shutil.rmtree(TEMP_FOLDER)
|
289 |
return None
|
290 |
print("Generated Script:\n", script)
|
291 |
+
|
292 |
elements = parse_script(script)
|
293 |
if not elements:
|
294 |
print("Failed to parse script into elements.")
|
295 |
shutil.rmtree(TEMP_FOLDER)
|
296 |
return None
|
297 |
+
print(f"Parsed {len(elements)} script segments.")
|
298 |
+
|
299 |
+
video_path = "video.mp4"
|
300 |
+
if not os.path.exists(video_path):
|
301 |
+
print("video.mp4 not found in the current directory.")
|
|
|
|
|
|
|
|
|
302 |
shutil.rmtree(TEMP_FOLDER)
|
303 |
return None
|
304 |
+
|
305 |
+
source_video = VideoFileClip(video_path)
|
306 |
+
total_duration = source_video.duration
|
307 |
+
source_video.close()
|
308 |
+
print(f"Source video duration: {total_duration:.2f} seconds")
|
309 |
+
|
310 |
clips = []
|
311 |
+
for idx, tts_elem in enumerate(elements):
|
312 |
+
print(f"\nProcessing segment {idx+1}/{len(elements)}")
|
|
|
|
|
|
|
|
|
313 |
tts_path = generate_tts(tts_elem['text'], tts_elem['voice'])
|
314 |
if not tts_path:
|
315 |
+
print(f"Skipping segment {idx+1} due to TTS failure.")
|
316 |
continue
|
317 |
+
|
318 |
+
audio_clip = AudioFileClip(tts_path)
|
319 |
+
segment_duration = audio_clip.duration
|
320 |
+
audio_clip.close()
|
321 |
+
|
322 |
+
max_start = total_duration - segment_duration
|
323 |
+
if max_start <= 0:
|
324 |
+
print(f"Segment duration {segment_duration:.2f}s exceeds video duration {total_duration:.2f}s.")
|
325 |
+
continue
|
326 |
+
|
327 |
+
start_time = random.uniform(0, max_start)
|
328 |
clip = create_clip(
|
329 |
+
video_path=video_path,
|
330 |
+
start_time=start_time,
|
331 |
+
duration=segment_duration,
|
332 |
tts_path=tts_path,
|
|
|
|
|
333 |
narration_text=tts_elem['text'],
|
334 |
segment_index=idx
|
335 |
)
|
|
|
337 |
clips.append(clip)
|
338 |
else:
|
339 |
print(f"Clip creation failed for segment {idx+1}.")
|
340 |
+
|
341 |
if not clips:
|
342 |
print("No clips were successfully created.")
|
343 |
shutil.rmtree(TEMP_FOLDER)
|
344 |
return None
|
345 |
+
|
346 |
print("\nConcatenating clips...")
|
347 |
final_video = concatenate_videoclips(clips, method="compose")
|
348 |
final_video = add_background_music(final_video, bg_music_volume=bg_music_volume)
|
349 |
+
|
350 |
print(f"Exporting final video to {OUTPUT_VIDEO_FILENAME}...")
|
351 |
final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', fps=fps, preset=preset)
|
352 |
print(f"Final video saved as {OUTPUT_VIDEO_FILENAME}")
|
353 |
+
|
|
|
354 |
print("Cleaning up temporary files...")
|
355 |
shutil.rmtree(TEMP_FOLDER)
|
356 |
print("Temporary files removed.")
|
357 |
+
|
358 |
return OUTPUT_VIDEO_FILENAME
|
359 |
|
360 |
+
# Gradio Interface
|
361 |
VOICE_CHOICES = {
|
362 |
'Emma (Female)': 'af_heart',
|
363 |
'Bella (Female)': 'af_bella',
|
|
|
390 |
}
|
391 |
|
392 |
def generate_video_with_options(user_input, resolution, caption_option, music_file, voice, vclip_prob, bg_vol, video_fps, video_preset, v_speed, caption_size):
|
393 |
+
"""Wrapper function for Gradio interface to set global options."""
|
394 |
+
global selected_voice, voice_speed, font_size, bg_music_volume, fps, preset
|
|
|
395 |
selected_voice = VOICE_CHOICES[voice]
|
396 |
voice_speed = v_speed
|
397 |
font_size = caption_size
|
|
|
398 |
bg_music_volume = bg_vol
|
399 |
fps = video_fps
|
400 |
preset = video_preset
|
|
|
|
|
401 |
if music_file is not None:
|
402 |
target_path = "music.mp3"
|
403 |
shutil.copy(music_file.name, target_path)
|
404 |
print(f"Uploaded music saved as: {target_path}")
|
|
|
|
|
405 |
return generate_video(user_input, resolution, caption_option)
|
406 |
|
|
|
407 |
iface = gr.Interface(
|
408 |
fn=generate_video_with_options,
|
409 |
inputs=[
|
410 |
gr.Textbox(label="Video Concept", placeholder="Enter your video concept here..."),
|
411 |
+
gr.Radio(["Full", "Short"], label="Resolution", value="Short", visible=False), # Hidden, fixed to Short
|
412 |
+
gr.Radio(["Yes", "No"], label="Include Captions", value="No"),
|
413 |
gr.File(label="Upload Background Music (MP3)", file_types=[".mp3"]),
|
414 |
gr.Dropdown(choices=list(VOICE_CHOICES.keys()), label="Choose Voice", value="Emma (Female)"),
|
415 |
+
gr.Slider(0, 100, value=25, step=1, label="Video Clip Usage Probability (%)", visible=False), # Unused
|
416 |
gr.Slider(0.0, 1.0, value=0.08, step=0.01, label="Background Music Volume"),
|
417 |
gr.Slider(10, 60, value=30, step=1, label="Video FPS"),
|
418 |
gr.Dropdown(choices=["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow"],
|
419 |
value="veryfast", label="Export Preset"),
|
420 |
+
gr.Slider(0.5, 1.5, value=0.9, step=0.05, label="Voice Speed"),
|
421 |
gr.Slider(20, 100, value=45, step=1, label="Caption Font Size")
|
422 |
],
|
423 |
outputs=gr.Video(label="Generated Video"),
|
424 |
title="AI Documentary Video Generator",
|
425 |
+
description="Create short documentary videos using video.mp4 with AI narration and synced captions."
|
426 |
)
|
427 |
|
|
|
428 |
if __name__ == "__main__":
|
429 |
iface.launch(share=True)
|