testdeep123 commited on
Commit
fcc433b
Β·
verified Β·
1 Parent(s): b888295

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +700 -906
app.py CHANGED
@@ -1,187 +1,245 @@
 
 
1
  # Import necessary libraries
2
- from kokoro import KPipeline # Assuming kokoro is installed and working
3
 
4
  import soundfile as sf
5
  import torch
 
 
6
  import os
 
 
7
  import tempfile
8
  import random
 
9
  import math
10
- import time
11
- import re
12
- import requests
13
- import io
14
- import shutil
15
- from urllib.parse import quote # Needed for search_google_images
16
- import numpy as np
17
- from bs4 import BeautifulSoup # Needed for search_google_images
18
- import base64
19
- from gtts import gTTS
20
- import gradio as gr
21
- from PIL import Image, ImageDraw, ImageFont
22
- import cv2 # OpenCV for image processing in Ken Burns
23
-
24
- # MoviePy imports
25
  from moviepy.editor import (
26
- VideoFileClip, AudioFileClip, ImageClip, CompositeVideoClip, TextClip,
27
- concatenate_videoclips, CompositeAudioClip
28
  )
 
 
 
29
  import moviepy.video.fx.all as vfx
30
-
31
- # Pydub imports
32
  from pydub import AudioSegment
33
  from pydub.generators import Sine
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  # ---------------- Global Configuration ---------------- #
36
  PEXELS_API_KEY = 'BhJqbcdm9Vi90KqzXKAhnEHGsuFNv4irXuOjWtT761U49lRzo03qBGna'
37
  OPENROUTER_API_KEY = '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'
38
  OPENROUTER_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free"
39
  OUTPUT_VIDEO_FILENAME = "final_video.mp4"
40
- # --- Web Request Settings ---
41
  USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
42
 
43
- # --- Gradio Controlled Variables (with defaults) ---
44
- selected_voice = 'af_heart'
45
- voice_speed = 0.9
46
- font_size = 45
47
- video_clip_probability = 0.25 # Default 25%
48
- bg_music_volume = 0.08
49
- fps = 30
50
- preset = "veryfast"
51
- caption_style_bg_color = 'rgba(0, 0, 0, 0.6)'
52
- caption_style_text_color = 'yellow'
53
- caption_font = 'Arial-Bold'
54
-
55
- # --- Runtime Variables (set per execution) ---
56
  TARGET_RESOLUTION = None
 
57
  TEMP_FOLDER = None
58
- USE_CAPTIONS = True
59
 
60
- # ---------------- Kokoro TTS Initialization ---------------- #
61
- try:
62
- pipeline = KPipeline(lang_code='a')
63
- print("Kokoro TTS Pipeline initialized.")
64
- except Exception as e:
65
- print(f"Warning: Failed to initialize Kokoro TTS Pipeline: {e}")
66
- print("TTS generation will rely on gTTS.")
67
- pipeline = None
68
 
69
  # ---------------- Helper Functions ---------------- #
 
 
 
 
 
 
 
 
 
 
70
 
71
  def generate_script(user_input):
72
- """Generate documentary script using OpenRouter API."""
73
- # (Retained from previous versions)
74
  headers = {
75
  'Authorization': f'Bearer {OPENROUTER_API_KEY}',
76
- 'Content-Type': 'application/json',
77
- 'HTTP-Referer': 'http://localhost:7860', # Or your app's URL
78
- 'X-Title': 'AI Documentary Maker Gradio'
79
  }
80
- prompt = f"""Create a short, humorous, slightly negative, and conversational documentary-style script based on the following topic or instructions: '{user_input}'.
81
 
 
 
 
 
 
 
82
  Formatting Rules:
83
- 1. Start each distinct visual scene/idea with a title in square brackets `[Like This]`. This title will be used for searching visuals. Keep titles concise (1-3 words).
84
- 2. After the bracketed title, write 1-2 short sentences (5-15 words total) of narration for that scene.
85
- 3. Keep the narration casual, funny, maybe a bit sarcastic or critical, and human-like. Avoid sounding like a robotic AI.
86
- 4. Do NOT use any other formatting like bold, italics, or bullet points.
87
- 5. Ensure search terms in brackets are general enough for stock footage searches (e.g., use "[Technology]" instead of "[Quantum Supercomputer]").
88
- 6. End the *entire* script with a funny, topic-related call to subscribe, also enclosed in brackets like `[Subscribe CTA]`.
89
- 7. Focus on one core topic for the entire script.
90
- 8. Output *only* the formatted script, nothing else.
91
-
92
- Example:
93
- [Cats]
94
- So, you think cats are cute? Let's investigate.
95
- [Sleeping]
96
- They spend 90% of their lives asleep. Lazy, much?
97
- [Judgment]
98
- The other 10%? Judging your life choices. Harsh.
99
- [Boxes]
100
- Their obsession with boxes remains unexplained. Weirdos.
101
- [Subscribe CTA]
102
- Subscribe now, or a cat will knock your coffee over.
103
-
104
- Now generate the script based on: {user_input}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  """
 
106
  data = {
107
  'model': OPENROUTER_MODEL,
108
  'messages': [{'role': 'user', 'content': prompt}],
109
- 'temperature': 0.6,
110
- 'max_tokens': 600
111
  }
 
112
  try:
113
  response = requests.post(
114
  'https://openrouter.ai/api/v1/chat/completions',
115
  headers=headers,
116
  json=data,
117
- timeout=45
118
  )
119
- response.raise_for_status()
120
- response_data = response.json()
121
- if 'choices' in response_data and len(response_data['choices']) > 0:
122
- script_content = response_data['choices'][0]['message']['content']
123
- script_content = re.sub(r"^.*?\n?\[", "[", script_content, flags=re.DOTALL)
124
- script_content = script_content.strip()
125
- print("Script generated successfully.")
126
- return script_content
127
  else:
128
- print(f"API Error: Unexpected response format: {response_data}")
129
  return None
130
- except requests.exceptions.Timeout:
131
- print("API Error: Request timed out.")
132
- return None
133
- except requests.exceptions.RequestException as e:
134
- print(f"API Error: Request failed: {e}")
135
- if hasattr(e, 'response') and e.response is not None:
136
- print(f"API Response Status Code: {e.response.status_code}")
137
- print(f"API Response Text: {e.response.text}")
138
- return None
139
  except Exception as e:
140
- print(f"Error during script generation: {str(e)}")
141
  return None
142
 
143
  def parse_script(script_text):
144
- """Parse the generated script into segments."""
145
- # (Retained from previous versions)
146
- segments = []
 
 
 
 
147
  current_title = None
148
- current_narration = ""
149
- if not script_text:
150
- print("Error: Script text is empty.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  return []
152
- lines = script_text.strip().splitlines()
153
- for line in lines:
154
- line = line.strip()
155
- if not line: continue
156
- title_match = re.match(r'^\[(.*?)\](.*)', line)
157
- if title_match:
158
- if current_title is not None and current_narration.strip():
159
- segments.append({"prompt": current_title, "narration": current_narration.strip()})
160
- current_title = title_match.group(1).strip()
161
- current_narration = title_match.group(2).strip() + " "
162
- elif current_title is not None:
163
- current_narration += line + " "
164
- if current_title is not None and current_narration.strip():
165
- segments.append({"prompt": current_title, "narration": current_narration.strip()})
166
- if not segments:
167
- print("Error: Could not parse any segments from the script.")
168
- simple_segments = []
169
- for i, line in enumerate(lines):
170
- if line.strip(): simple_segments.append({"prompt": f"Scene {i+1}", "narration": line.strip()})
171
- if simple_segments:
172
- print("Warning: Using simplified script parsing.")
173
- return simple_segments
174
- else: return []
175
- print(f"Parsed {len(segments)} segments from script.")
176
- return segments
177
-
178
- # --- Start: User Provided Functions ---
179
 
180
  def search_pexels_videos(query, pexels_api_key):
181
  """Search for a video on Pexels by query and return a random HD video."""
182
- if not pexels_api_key or pexels_api_key == 'YOUR_PEXELS_API_KEY':
183
- print(f"Pexels API key not provided or is default. Skipping Pexels video search.")
184
- return None
185
  headers = {'Authorization': pexels_api_key}
186
  base_url = "https://api.pexels.com/videos/search"
187
  num_pages = 3
@@ -189,990 +247,726 @@ def search_pexels_videos(query, pexels_api_key):
189
 
190
  max_retries = 3
191
  retry_delay = 1
192
- timeout_duration = 15 # Increased timeout slightly
193
 
194
  search_query = query
195
  all_videos = []
196
- print(f"Searching Pexels videos for '{query}' (up to {num_pages} pages)...")
197
 
198
  for page in range(1, num_pages + 1):
199
- print(f" Pexels Video Search: Page {page}")
200
  for attempt in range(max_retries):
201
  try:
202
- params = {"query": search_query, "per_page": videos_per_page, "page": page, "orientation": "landscape"}
203
- response = requests.get(base_url, headers=headers, params=params, timeout=timeout_duration)
204
 
205
  if response.status_code == 200:
206
  data = response.json()
207
  videos = data.get("videos", [])
208
 
209
  if not videos:
210
- print(f" No videos found on page {page} for '{query}'.")
211
- # Don't break inner loop immediately, maybe next page has results
212
- # Break outer loop if no videos found on *this* page attempt
213
- break # Break attempt loop for this page
214
 
215
- found_on_page = 0
216
  for video in videos:
217
  video_files = video.get("video_files", [])
218
- # Prioritize HD, then large, then medium
219
- hd_link = next((f['link'] for f in video_files if f.get('quality') == 'hd' and f.get('width', 0) >= 1080), None)
220
- large_link = next((f['link'] for f in video_files if f.get('quality') == 'large' and f.get('width', 0) >= 1080), None)
221
- medium_link = next((f['link'] for f in video_files if f.get('quality') == 'medium'), None) # Fallback if no HD/Large
222
- link_to_add = hd_link or large_link or medium_link
223
-
224
- if link_to_add:
225
- all_videos.append(link_to_add)
226
- found_on_page += 1
227
- # Don't break inner loop, collect all suitable videos from the page
228
-
229
- print(f" Found {found_on_page} suitable videos on page {page}.")
230
- break # Break attempt loop successfully after processing page
231
-
232
- elif response.status_code == 401:
233
- print(f" Pexels API Error: Unauthorized (401). Check your API Key.")
234
- return None # Stop searching if key is bad
235
  elif response.status_code == 429:
236
- print(f" Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
237
  time.sleep(retry_delay)
238
  retry_delay *= 2
239
- elif response.status_code == 522:
240
- print(f" Pexels API Error: Connection Timed Out (522) (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
241
- time.sleep(retry_delay)
242
- retry_delay *= 2
243
  else:
244
- print(f" Error fetching videos: {response.status_code} {response.text}")
245
  if attempt < max_retries - 1:
246
- print(f" Retrying in {retry_delay} seconds...")
247
  time.sleep(retry_delay)
248
  retry_delay *= 2
249
  else:
250
- print(f" Max retries reached for page {page}.")
251
- break # Break attempt loop for this page after max retries
252
 
253
- except requests.exceptions.Timeout:
254
- print(f" Request timed out (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
255
- time.sleep(retry_delay)
256
- retry_delay *= 2
257
  except requests.exceptions.RequestException as e:
258
- print(f" Request exception: {e}")
259
  if attempt < max_retries - 1:
260
- print(f" Retrying in {retry_delay} seconds...")
261
  time.sleep(retry_delay)
262
  retry_delay *= 2
263
  else:
264
- print(f" Max retries reached for page {page} due to request exception.")
265
- break # Break attempt loop for this page
266
-
267
- # Reset retry delay for the next page
268
- retry_delay = 1
269
 
270
  if all_videos:
271
  random_video = random.choice(all_videos)
272
- print(f"Selected random video from {len(all_videos)} suitable videos found across pages.")
273
  return random_video
274
  else:
275
- print(f"No suitable videos found for '{query}' after searching {num_pages} pages.")
276
  return None
277
 
278
  def search_pexels_images(query, pexels_api_key):
279
  """Search for an image on Pexels by query."""
280
- if not pexels_api_key or pexels_api_key == 'YOUR_PEXELS_API_KEY':
281
- print(f"Pexels API key not provided or is default. Skipping Pexels image search.")
282
- return None
283
  headers = {'Authorization': pexels_api_key}
284
  url = "https://api.pexels.com/v1/search"
285
- # Fetch more results to increase chance of finding good ones
286
- params = {"query": query, "per_page": 15, "orientation": "landscape"}
287
 
288
  max_retries = 3
289
  retry_delay = 1
290
- timeout_duration = 15 # Increased timeout slightly
291
-
292
- print(f"Searching Pexels images for '{query}'...")
293
 
294
  for attempt in range(max_retries):
295
  try:
296
- response = requests.get(url, headers=headers, params=params, timeout=timeout_duration)
297
 
298
  if response.status_code == 200:
299
  data = response.json()
300
  photos = data.get("photos", [])
301
  if photos:
302
- # Select from all returned photos, preferring larger sizes
303
- valid_photos = []
304
- for photo in photos:
305
- # Prefer large2x or original, fallback to large
306
- large2x_url = photo.get("src", {}).get("large2x")
307
- original_url = photo.get("src", {}).get("original")
308
- large_url = photo.get("src", {}).get("large")
309
- img_url = large2x_url or original_url or large_url
310
- if img_url:
311
- valid_photos.append(img_url)
312
-
313
- if valid_photos:
314
- selected_photo = random.choice(valid_photos)
315
- print(f"Selected random image from {len(valid_photos)} suitable images found.")
316
- return selected_photo
317
- else:
318
- print(f"No suitable image URLs found in results for query: {query}")
319
- return None # Found photos but no usable URLs
320
  else:
321
  print(f"No images found for query: {query}")
322
- return None # API returned empty 'photos' list
323
 
324
- elif response.status_code == 401:
325
- print(f" Pexels API Error: Unauthorized (401). Check your API Key.")
326
- return None # Stop searching if key is bad
327
  elif response.status_code == 429:
328
- print(f" Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
329
  time.sleep(retry_delay)
330
  retry_delay *= 2
331
- elif response.status_code == 522:
332
- print(f" Pexels API Error: Connection Timed Out (522) (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
333
- time.sleep(retry_delay)
334
- retry_delay *= 2
335
  else:
336
- print(f" Error fetching images: {response.status_code} {response.text}")
337
  if attempt < max_retries - 1:
338
- print(f" Retrying in {retry_delay} seconds...")
339
  time.sleep(retry_delay)
340
  retry_delay *= 2
341
- else:
342
- break # Max retries for other errors
343
 
344
- except requests.exceptions.Timeout:
345
- print(f" Request timed out (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
346
- time.sleep(retry_delay)
347
- retry_delay *= 2
348
  except requests.exceptions.RequestException as e:
349
- print(f" Request exception: {e}")
350
  if attempt < max_retries - 1:
351
- print(f" Retrying in {retry_delay} seconds...")
352
  time.sleep(retry_delay)
353
  retry_delay *= 2
354
- else:
355
- break # Max retries after request exception
356
 
357
  print(f"No Pexels images found for query: {query} after all attempts")
358
  return None
359
 
360
- # Added back search_google_images as it's used in the provided generate_media
361
  def search_google_images(query):
362
- """Search for images on Google Images (use sparingly and ethically)."""
363
- print(f"Attempting Google Image search for: {query} (Use with caution)")
364
  try:
365
- search_url = f"https://www.google.com/search?q={quote(query)}&tbm=isch&safe=active"
366
  headers = {"User-Agent": USER_AGENT}
367
  response = requests.get(search_url, headers=headers, timeout=10)
368
- response.raise_for_status()
369
  soup = BeautifulSoup(response.text, "html.parser")
370
 
371
  img_tags = soup.find_all("img")
372
  image_urls = []
373
  for img in img_tags:
374
- src = img.get("data-src") or img.get("src")
375
- if src and src.startswith("http") and not "gstatic" in src and not src.startswith("data:image"):
376
- if any(ext in src.lower() for ext in ['.jpg', '.jpeg', '.png', '.webp']):
377
- image_urls.append(src)
378
 
379
  if image_urls:
380
- print(f"Found {len(image_urls)} potential Google Images for '{query}'.")
381
- # Select randomly from top 10 potential URLs
382
- return random.choice(image_urls[:min(len(image_urls), 10)])
383
  else:
384
- print(f"No suitable Google Images found for query: {query}")
385
  return None
386
- except requests.exceptions.RequestException as e:
387
- print(f"Error during Google Images search request: {e}")
388
- return None
389
  except Exception as e:
390
- print(f"Error parsing Google Images search results: {e}")
391
  return None
392
 
393
-
394
  def download_image(image_url, filename):
395
  """Download an image from a URL to a local file with enhanced error handling."""
396
  try:
397
  headers = {"User-Agent": USER_AGENT}
398
  print(f"Downloading image from: {image_url} to {filename}")
399
- response = requests.get(image_url, headers=headers, stream=True, timeout=20) # Increased timeout
400
  response.raise_for_status()
401
 
402
- # Ensure the target directory exists
403
- os.makedirs(os.path.dirname(filename), exist_ok=True)
404
-
405
  with open(filename, 'wb') as f:
406
  for chunk in response.iter_content(chunk_size=8192):
407
  f.write(chunk)
408
 
409
- print(f" Image downloaded successfully to: {filename}")
410
 
411
- # Validate and convert image
412
  try:
413
  img = Image.open(filename)
414
- img.verify() # Check if it's a valid image file format
415
- img.close() # Close file handle after verify
416
-
417
- # Re-open to check mode and convert if needed
418
  img = Image.open(filename)
419
  if img.mode != 'RGB':
420
- print(f" Converting image {os.path.basename(filename)} to RGB.")
421
- # Ensure conversion doesn't create an empty file on error
422
- try:
423
- rgb_img = img.convert('RGB')
424
- # Save to a temporary file first, then replace original
425
- temp_filename = filename + ".tmp.jpg"
426
- rgb_img.save(temp_filename, "JPEG")
427
- rgb_img.close()
428
- img.close()
429
- os.replace(temp_filename, filename) # Atomic replace if possible
430
- print(f" Image successfully converted and saved as {os.path.basename(filename)}")
431
- except Exception as e_convert:
432
- print(f" Error converting image to RGB: {e_convert}")
433
- img.close() # Close original image handle
434
- if os.path.exists(filename): os.remove(filename) # Remove partially converted/original
435
- if os.path.exists(temp_filename): os.remove(temp_filename) # Clean up temp
436
- return None
437
- else:
438
- img.close() # Close if already RGB
439
- print(f" Image {os.path.basename(filename)} validated (already RGB).")
440
-
441
- # Final check if file exists after processing
442
- if os.path.exists(filename) and os.path.getsize(filename) > 0:
443
- return filename
444
- else:
445
- print(f" Image file {os.path.basename(filename)} missing or empty after processing.")
446
- return None
447
-
448
- except (IOError, SyntaxError, Image.UnidentifiedImageError) as e_validate:
449
- print(f" Downloaded file {os.path.basename(filename)} is not a valid image or corrupted: {e_validate}")
450
- if os.path.exists(filename): os.remove(filename)
451
  return None
452
- except Exception as e_general_process:
453
- print(f" Unexpected error during image processing: {e_general_process}")
454
- if os.path.exists(filename): os.remove(filename)
455
- return None
456
-
457
 
458
  except requests.exceptions.RequestException as e_download:
459
- print(f" Image download error: {e_download}")
460
- # Don't remove file here, might not exist or be partial
 
461
  return None
462
- except Exception as e_general_download:
463
- print(f" General error during image download request: {e_general_download}")
 
 
464
  return None
465
 
466
  def download_video(video_url, filename):
467
  """Download a video from a URL to a local file."""
468
  try:
469
- headers = {"User-Agent": USER_AGENT} # Add User-Agent
470
- print(f"Downloading video from: {video_url} to {filename}")
471
- response = requests.get(video_url, headers=headers, stream=True, timeout=60) # Longer timeout for videos
472
  response.raise_for_status()
473
-
474
- # Ensure the target directory exists
475
- os.makedirs(os.path.dirname(filename), exist_ok=True)
476
-
477
  with open(filename, 'wb') as f:
478
- for chunk in response.iter_content(chunk_size=1024*1024): # Larger chunk size for videos
479
  f.write(chunk)
480
- print(f" Video downloaded successfully to: {filename}")
481
- # Basic check: file exists and has size > 0
482
- if os.path.exists(filename) and os.path.getsize(filename) > 0:
483
- return filename
484
- else:
485
- print(f" Video file {os.path.basename(filename)} missing or empty after download.")
486
- if os.path.exists(filename): os.remove(filename) # Clean up empty file
487
- return None
488
- except requests.exceptions.RequestException as e:
489
  print(f"Video download error: {e}")
490
- # Don't remove file here, might not exist or be partial
 
491
  return None
492
- except Exception as e_general:
493
- print(f"General error during video download: {e_general}")
494
- return None
495
 
496
  def generate_media(prompt, user_image=None, current_index=0, total_segments=1):
497
  """
498
- Generate a visual asset using user-provided functions.
499
- Checks for "news", tries Pexels video/image, falls back to generic Pexels image.
 
500
  """
501
- # Sanitize prompt for filename
502
- safe_prompt_base = re.sub(r'[^\w\s-]', '', prompt).strip().replace(' ', '_')
503
- safe_prompt_base = safe_prompt_base[:50] # Limit length
504
- # Add timestamp for uniqueness within the temp folder
505
- safe_prompt = f"{safe_prompt_base}_{int(time.time())}"
506
 
507
- # --- News Check (Uses Google Images) ---
508
  if "news" in prompt.lower():
509
- print(f"News-related query detected: '{prompt}'. Trying Google Images...")
510
  image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_news.jpg")
511
- image_url = search_google_images(prompt) # Use the added back function
512
  if image_url:
513
  downloaded_image = download_image(image_url, image_file)
514
  if downloaded_image:
515
- print(f" Using Google Image news asset: {os.path.basename(downloaded_image)}")
516
  return {"path": downloaded_image, "asset_type": "image"}
517
- else:
518
- print(f" Google Image download failed for: {image_url}")
519
  else:
520
- print(f" Google Images search failed for prompt: '{prompt}'. Proceeding with Pexels.")
521
- # Fall through to Pexels if Google fails
522
 
523
- # --- Pexels Video Attempt ---
524
  if random.random() < video_clip_probability:
525
- print(f"Attempting Pexels video search for: '{prompt}'")
526
  video_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_video.mp4")
527
  video_url = search_pexels_videos(prompt, PEXELS_API_KEY)
528
  if video_url:
529
  downloaded_video = download_video(video_url, video_file)
530
  if downloaded_video:
531
- print(f" Using Pexels video asset: {os.path.basename(downloaded_video)}")
532
  return {"path": downloaded_video, "asset_type": "video"}
533
- else:
534
- print(f" Pexels video download failed for: {video_url}")
535
  else:
536
- print(f" Pexels video search yielded no results for '{prompt}'.")
537
- else:
538
- print(f"Skipping Pexels video search based on probability for '{prompt}'.")
539
 
540
-
541
- # --- Pexels Image Attempt ---
542
- print(f"Attempting Pexels image search for: '{prompt}'")
543
- image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_image.jpg")
544
  image_url = search_pexels_images(prompt, PEXELS_API_KEY)
545
  if image_url:
546
  downloaded_image = download_image(image_url, image_file)
547
  if downloaded_image:
548
- print(f" Using Pexels image asset: {os.path.basename(downloaded_image)}")
549
  return {"path": downloaded_image, "asset_type": "image"}
550
  else:
551
- print(f" Pexels image download failed for: {image_url}")
552
- else:
553
- print(f" Pexels image search yielded no results for '{prompt}'.")
554
-
555
- # --- Fallback Pexels Image Attempt ---
556
- # Avoid fallback for instructional prompts
557
- if "subscribe" not in prompt.lower() and "cta" not in prompt.lower():
558
- fallback_terms = ["technology", "abstract", "nature", "background", "texture"]
559
- term = random.choice(fallback_terms)
560
- print(f"All specific searches failed for '{prompt}'. Trying Pexels fallback term: '{term}'")
561
- fallback_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_fallback_{term}.jpg")
562
  fallback_url = search_pexels_images(term, PEXELS_API_KEY)
563
  if fallback_url:
564
  downloaded_fallback = download_image(fallback_url, fallback_file)
565
  if downloaded_fallback:
566
- print(f" Using Pexels fallback image asset: {os.path.basename(downloaded_fallback)}")
567
  return {"path": downloaded_fallback, "asset_type": "image"}
568
  else:
569
- print(f" Pexels fallback image download failed for term: '{term}'")
570
  else:
571
- print(f" Pexels fallback image search failed for term: '{term}'")
572
- else:
573
- print(f"Skipping fallback search for instructional prompt: '{prompt}'")
574
-
575
 
576
- # --- Final Failure ---
577
- print(f"FATAL: Failed to generate any visual asset for prompt: '{prompt}'")
578
  return None
579
 
580
- # --- End: User Provided Functions ---
 
 
 
 
 
 
 
581
 
 
 
 
 
 
 
 
 
 
 
582
 
583
- def generate_tts(text, voice_id, speed):
584
- """Generate TTS audio using Kokoro, falling back to gTTS."""
585
- # (Retained from previous versions)
586
- safe_text_prefix = re.sub(r'[^\w\s-]', '', text[:20]).strip().replace(' ', '_')
587
- output_filename = os.path.join(TEMP_FOLDER, f"tts_{safe_text_prefix}_{voice_id}.wav")
588
- if pipeline:
589
- try:
590
- print(f"Generating TTS with Kokoro (Voice: {voice_id}, Speed: {speed}) for: '{text[:30]}...'")
591
- generator = pipeline(text, voice=voice_id, speed=speed)
592
- audio_segments = []
593
- for item in generator:
594
- if isinstance(item, tuple) and len(item) > 0 and isinstance(item[-1], np.ndarray):
595
- audio_segments.append(item[-1])
596
- elif isinstance(item, np.ndarray):
597
- audio_segments.append(item)
598
- if not audio_segments: raise ValueError("Kokoro TTS returned no audio segments.")
599
- full_audio = np.concatenate(audio_segments) if len(audio_segments) > 0 else audio_segments[0]
600
- if full_audio.dtype != np.float32:
601
- full_audio = full_audio.astype(np.float32)
602
- max_val = np.max(np.abs(full_audio))
603
- if max_val > 1.0: full_audio /= max_val
604
- sf.write(output_filename, full_audio, 24000)
605
- print(f"Kokoro TTS audio saved to {output_filename}")
606
- return output_filename
607
- except Exception as e:
608
- print(f"Error with Kokoro TTS: {e}. Falling back to gTTS.")
609
  try:
610
- print(f"Generating TTS with gTTS for: '{text[:30]}...'")
611
- tts = gTTS(text=text, lang='en', slow= (speed < 0.9) )
612
- mp3_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text_prefix}_gtts.mp3")
613
- wav_path = output_filename
614
- tts.save(mp3_path)
615
- audio = AudioSegment.from_mp3(mp3_path)
616
- # Ensure target directory exists for export
617
- os.makedirs(os.path.dirname(wav_path), exist_ok=True)
618
- audio.export(wav_path, format="wav")
619
- os.remove(mp3_path)
620
- print(f"gTTS audio saved and converted to {wav_path}")
621
- # Check if file exists and has size
622
- if os.path.exists(wav_path) and os.path.getsize(wav_path) > 0:
623
- return wav_path
624
- else:
625
- print(f"Error: gTTS output file missing or empty: {wav_path}")
626
- return None
627
- except ImportError:
628
- print("Error: gTTS or pydub might not be installed. Cannot use gTTS fallback.")
629
- return None
630
- except Exception as fallback_error:
631
- print(f"Error with gTTS fallback: {fallback_error}")
632
- return None
 
 
 
 
 
 
 
633
 
634
- def apply_kenburns_effect(clip, target_resolution, duration):
635
- """Apply a randomized Ken Burns effect (zoom/pan) to an image clip."""
636
- # (Retained from previous versions)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637
  target_w, target_h = target_resolution
638
- try: # Add try-except around accessing clip properties
639
- img_w, img_h = clip.size
640
- except Exception as e:
641
- print(f"Error accessing image clip size: {e}")
642
- # Cannot apply effect if size is unknown, return original clip resized
643
- return clip.resize(newsize=target_resolution).set_duration(duration)
644
-
645
- scale_factor = 1.2
646
- # Handle potential division by zero if img_h is 0
647
- if img_h == 0: img_h = 1
648
- img_aspect = img_w / img_h
649
  target_aspect = target_w / target_h
650
 
651
- # Resize logic
652
- if img_aspect > target_aspect:
653
- final_h = target_h * scale_factor
654
- final_w = final_h * img_aspect
655
  else:
656
- final_w = target_w * scale_factor
657
- final_h = final_w / img_aspect # Use img_aspect here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
658
 
659
- final_w, final_h = int(final_w), int(final_h)
660
- # Ensure final dimensions are not zero
661
- if final_w <= 0: final_w = 1
662
- if final_h <= 0: final_h = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
  try:
665
- # Use MoviePy's resize for ImageClip directly before applying fl
666
- resized_clip = clip.resize(newsize=(final_w, final_h)).set_duration(duration)
667
- # pil_img = Image.fromarray(clip.get_frame(0)) # Avoid get_frame here
668
- # resized_pil = pil_img.resize((final_w, final_h), Image.Resampling.LANCZOS)
669
- # resized_clip = ImageClip(np.array(resized_pil)).set_duration(duration)
 
 
 
 
 
 
 
 
 
 
 
 
 
670
  except Exception as e:
671
- print(f"Warning: Error during resize for Ken Burns, using MoviePy default: {e}")
672
- try:
673
- # Fallback resize
674
- resized_clip = clip.resize(newsize=(final_w, final_h)).set_duration(duration)
675
- except Exception as resize_err:
676
- print(f"FATAL error during fallback resize for Ken Burns: {resize_err}")
677
- # Return original clip resized to target as last resort
678
- return clip.resize(newsize=target_resolution).set_duration(duration)
679
-
680
-
681
- max_move_x = final_w - target_w
682
- max_move_y = final_h - target_h
683
- # Ensure max_move is not negative if resizing failed slightly
684
- max_move_x = max(0, max_move_x)
685
- max_move_y = max(0, max_move_y)
686
-
687
- effect = random.choice(['zoom_in', 'zoom_out', 'pan_lr', 'pan_rl', 'pan_td', 'pan_dt'])
688
- if effect == 'zoom_in': zoom_start, zoom_end = 1.0, scale_factor; x_start, x_end = max_move_x / 2, max_move_x / 2; y_start, y_end = max_move_y / 2, max_move_y / 2
689
- elif effect == 'zoom_out': zoom_start, zoom_end = scale_factor, 1.0; x_start, x_end = max_move_x / 2, max_move_x / 2; y_start, y_end = max_move_y / 2, max_move_y / 2
690
- elif effect == 'pan_lr': zoom_start, zoom_end = scale_factor, scale_factor; x_start, x_end = 0, max_move_x; y_start, y_end = max_move_y / 2, max_move_y / 2
691
- elif effect == 'pan_rl': zoom_start, zoom_end = scale_factor, scale_factor; x_start, x_end = max_move_x, 0; y_start, y_end = max_move_y / 2, max_move_y / 2
692
- elif effect == 'pan_td': zoom_start, zoom_end = scale_factor, scale_factor; x_start, x_end = max_move_x / 2, max_move_x / 2; y_start, y_end = 0, max_move_y
693
- else: zoom_start, zoom_end = scale_factor, scale_factor; x_start, x_end = max_move_x / 2, max_move_x / 2; y_start, y_end = max_move_y, 0
694
-
695
- def make_frame(t):
696
- # Protect against duration being zero
697
- interp = t / duration if duration > 0 else 0
698
-
699
- current_zoom = zoom_start + (zoom_end - zoom_start) * interp
700
- current_x = x_start + (x_end - x_start) * interp
701
- current_y = y_start + (y_end - y_start) * interp
702
-
703
- # Avoid division by zero for zoom
704
- if current_zoom <= 0: current_zoom = 1e-6 # Small positive number
705
-
706
- # Calculate crop box dimensions based on current zoom relative to the base scale_factor
707
- crop_w = target_w / (current_zoom / scale_factor)
708
- crop_h = target_h / (current_zoom / scale_factor)
709
- crop_w = max(1, int(crop_w)); crop_h = max(1, int(crop_h))
710
-
711
- # Calculate top-left corner (x1, y1)
712
- x1 = current_x; y1 = current_y
713
- x1 = max(0, min(x1, final_w - crop_w)); y1 = max(0, min(y1, final_h - crop_h))
714
-
715
- # Get frame from the *resized* clip
716
- frame = resized_clip.get_frame(t)
717
-
718
- # Crop using numpy slicing
719
- try:
720
- # Ensure indices are integers and within bounds
721
- y1_int, y2_int = int(y1), int(y1 + crop_h)
722
- x1_int, x2_int = int(x1), int(x1 + crop_w)
723
- # Clamp coordinates to frame dimensions BEFORE slicing
724
- y1_int = max(0, min(y1_int, frame.shape[0] - 1))
725
- y2_int = max(y1_int + 1, min(y2_int, frame.shape[0])) # Ensure y2 > y1
726
- x1_int = max(0, min(x1_int, frame.shape[1] - 1))
727
- x2_int = max(x1_int + 1, min(x2_int, frame.shape[1])) # Ensure x2 > x1
728
 
729
- # Check if dimensions are valid after clamping
730
- if y2_int <= y1_int or x2_int <= x1_int:
731
- print(f"Warning: Invalid crop dimensions after clamping ({y1_int}:{y2_int}, {x1_int}:{x2_int}). Returning uncropped frame.")
732
- # Resize original frame to target as fallback for this frame
733
- return cv2.resize(frame, (target_w, target_h), interpolation=cv2.INTER_AREA)
734
 
735
 
736
- cropped_frame = frame[y1_int:y2_int, x1_int:x2_int]
737
 
738
- # Check if cropped frame is empty
739
- if cropped_frame.size == 0:
740
- print(f"Warning: Cropped frame is empty ({y1_int}:{y2_int}, {x1_int}:{x2_int}). Returning uncropped frame.")
741
- return cv2.resize(frame, (target_w, target_h), interpolation=cv2.INTER_AREA)
742
 
743
 
744
- # Resize cropped frame to target
745
- final_frame = cv2.resize(cropped_frame, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
746
- except IndexError as ie:
747
- print(f"Error during frame cropping/resizing: {ie}. Frame shape: {frame.shape}, Crop: y={y1_int}:{y2_int}, x={x1_int}:{x2_int}")
748
- # Fallback: return the original frame resized
749
- final_frame = cv2.resize(frame, (target_w, target_h), interpolation=cv2.INTER_AREA)
750
- except Exception as crop_resize_err:
751
- print(f"Unexpected error during frame cropping/resizing: {crop_resize_err}")
752
- final_frame = cv2.resize(frame, (target_w, target_h), interpolation=cv2.INTER_AREA)
753
 
754
 
755
- return final_frame
756
 
757
- # Apply the transformation using fl
758
- try:
759
- return resized_clip.fl(make_frame, apply_to=['mask'])
760
- except Exception as fl_err:
761
- print(f"Error applying Ken Burns effect via fl: {fl_err}")
762
- # Return the resized clip without the effect as fallback
763
- return resized_clip.resize(newsize=target_resolution)
764
 
765
 
766
- def resize_to_fill(clip, target_resolution):
767
- """Resize and crop a video clip to fill the target resolution."""
768
- # (Retained from previous versions)
769
- target_w, target_h = target_resolution
770
- try:
771
- # Ensure clip dimensions are valid
772
- if clip.w <= 0 or clip.h <= 0:
773
- print(f"Warning: Invalid clip dimensions ({clip.w}x{clip.h}). Cannot resize/crop.")
774
- # Return a black clip of target size? Or fail? Let's return black.
775
- from moviepy.editor import ColorClip
776
- return ColorClip(size=target_resolution, color=(0,0,0), duration=clip.duration)
777
 
778
- target_aspect = target_w / target_h
779
- clip_aspect = clip.w / clip.h
780
 
781
- if clip_aspect > target_aspect: resized_clip = clip.resize(height=target_h)
782
- else: resized_clip = clip.resize(width=target_w)
783
 
784
- # Ensure resized dimensions are valid
785
- if resized_clip.w <= 0 or resized_clip.h <= 0:
786
- print(f"Warning: Invalid resized clip dimensions ({resized_clip.w}x{resized_clip.h}).")
787
- from moviepy.editor import ColorClip
788
- return ColorClip(size=target_resolution, color=(0,0,0), duration=clip.duration)
789
 
790
 
791
- crop_x = max(0, (resized_clip.w - target_w) / 2)
792
- crop_y = max(0, (resized_clip.h - target_h) / 2)
793
 
794
- # Use integer coordinates for cropping
795
- cropped_clip = resized_clip.crop(x1=int(crop_x), y1=int(crop_y), width=target_w, height=target_h)
796
- return cropped_clip
797
- except Exception as e:
798
- print(f"Error during resize_to_fill: {e}")
799
- # Fallback: try simple resize to target resolution
800
- try:
801
- return clip.resize(newsize=target_resolution)
802
- except Exception as fallback_e:
803
- print(f"Fallback resize also failed: {fallback_e}")
804
- # Last resort: return black clip
805
- from moviepy.editor import ColorClip
806
- return ColorClip(size=target_resolution, color=(0,0,0), duration=clip.duration)
807
-
808
-
809
- def add_background_music(video_clip, music_file_path, volume):
810
- """Add background music, looping if necessary."""
811
- # (Retained from previous versions)
812
- if not music_file_path or not os.path.exists(music_file_path):
813
- print("No background music file found or provided. Skipping.")
814
- return video_clip
815
- try:
816
- print(f"Adding background music from: {music_file_path}")
817
- bg_music = AudioFileClip(music_file_path)
818
- # Check for valid duration
819
- if not bg_music or bg_music.duration is None or bg_music.duration <= 0:
820
- print("Warning: Background music file is invalid or has zero duration. Skipping.")
821
- return video_clip
822
- if not video_clip or video_clip.duration is None or video_clip.duration <=0:
823
- print("Warning: Video clip has invalid duration. Cannot add music.")
824
- # Return original clip without audio modification
825
- return video_clip
826
-
827
-
828
- if bg_music.duration > video_clip.duration: bg_music = bg_music.subclip(0, video_clip.duration)
829
- elif bg_music.duration < video_clip.duration:
830
- loops_needed = math.ceil(video_clip.duration / bg_music.duration)
831
- # Ensure bg_music can be concatenated
832
- try:
833
- bg_music = concatenate_audioclips([bg_music] * loops_needed)
834
- bg_music = bg_music.subclip(0, video_clip.duration)
835
- except Exception as concat_err:
836
- print(f"Error looping background music: {concat_err}. Skipping music.")
837
- return video_clip # Return original video if looping fails
838
-
839
- bg_music = bg_music.volumex(volume)
840
-
841
- if video_clip.audio:
842
- # Ensure video audio is valid
843
- if video_clip.audio.duration is None or video_clip.audio.duration <= 0:
844
- print("Warning: Video clip audio has invalid duration. Replacing with background music.")
845
- final_audio = bg_music
846
- else:
847
- # Make durations match exactly before composing
848
- try:
849
- video_audio_adjusted = video_clip.audio.set_duration(video_clip.duration)
850
- bg_music_adjusted = bg_music.set_duration(video_clip.duration)
851
- final_audio = CompositeAudioClip([video_audio_adjusted, bg_music_adjusted])
852
- except Exception as audio_comp_err:
853
- print(f"Error composing audio clips: {audio_comp_err}. Skipping music.")
854
- return video_clip # Return original video
855
- else:
856
- final_audio = bg_music # If original clip has no audio
857
 
858
- video_clip = video_clip.set_audio(final_audio)
859
- print("Background music added successfully.")
860
- return video_clip
861
- except Exception as e:
862
- print(f"Error adding background music: {e}. Skipping.")
863
- return video_clip
864
 
865
- def create_segment_clip(media_info, tts_path, narration_text):
866
- """Create a single video segment (clip) with visuals, audio, and subtitles."""
867
- # (Retained from previous versions, with minor stability checks)
868
- try:
869
- media_path = media_info['path']
870
- asset_type = media_info['asset_type']
871
- print(f"Creating clip segment: Type={asset_type}, Media={os.path.basename(media_path)}")
872
- if not os.path.exists(tts_path) or os.path.getsize(tts_path) == 0:
873
- print(f"Error: TTS file missing or empty: {tts_path}"); return None
874
- audio_clip = AudioFileClip(tts_path)
875
- if audio_clip.duration is None or audio_clip.duration <= 0:
876
- print(f"Error: Audio clip has invalid duration: {tts_path}"); return None
877
- segment_duration = audio_clip.duration + 0.3
878
 
879
- if asset_type == "video":
880
- if not os.path.exists(media_path) or os.path.getsize(media_path) == 0:
881
- print(f"Error: Video file missing or empty: {media_path}"); return None
882
- try:
883
- video_clip = VideoFileClip(media_path)
884
- if video_clip.duration is None or video_clip.duration <= 0:
885
- print(f"Error: Video clip has invalid duration: {media_path}")
886
- video_clip.close() # Close the clip
887
- return None
888
- if video_clip.duration < segment_duration:
889
- if video_clip.duration > 0:
890
- loops = math.ceil(segment_duration / video_clip.duration)
891
- video_clip_looped = concatenate_videoclips([video_clip] * loops)
892
- video_clip.close() # Close original after looping
893
- video_clip = video_clip_looped
894
- else: # Should have been caught above, but double check
895
- print(f"Error: Cannot loop zero-duration video: {media_path}")
896
- video_clip.close(); return None
897
-
898
- # Ensure subclip duration is valid
899
- video_clip_final = video_clip.subclip(0, min(segment_duration, video_clip.duration))
900
- video_clip.close() # Close intermediate clip if looped
901
- video_clip = video_clip_final
902
-
903
- visual_clip = resize_to_fill(video_clip, TARGET_RESOLUTION)
904
- video_clip.close() # Close after resize_to_fill finishes
905
-
906
- except Exception as video_load_err:
907
- print(f"Error loading or processing video {media_path}: {video_load_err}")
908
- return None
909
 
910
- elif asset_type == "image":
911
- if not os.path.exists(media_path) or os.path.getsize(media_path) == 0:
912
- print(f"Error: Image file missing or empty: {media_path}"); return None
913
- try:
914
- # Load image clip
915
- img_clip = ImageClip(media_path).set_duration(segment_duration)
916
- # Apply Ken Burns
917
- visual_clip = apply_kenburns_effect(img_clip, TARGET_RESOLUTION, segment_duration)
918
- # Ensure final size - Ken Burns should handle this, but double check
919
- visual_clip = visual_clip.resize(newsize=TARGET_RESOLUTION)
920
- # Close the base image clip resource if possible (ImageClip doesn't have explicit close)
921
- except Exception as img_load_err:
922
- print(f"Error loading or processing image {media_path}: {img_load_err}")
923
- return None
924
- else: print(f"Error: Unknown asset type: {asset_type}"); return None
925
-
926
- visual_clip = visual_clip.fadein(0.15).fadeout(0.15)
927
- subtitle_clips = []
928
- if USE_CAPTIONS and narration_text:
929
- words = narration_text.split()
930
- max_words_per_chunk = 5; chunks = []; current_chunk = []
931
- for word in words:
932
- current_chunk.append(word)
933
- if len(current_chunk) >= max_words_per_chunk: chunks.append(" ".join(current_chunk)); current_chunk = []
934
- if current_chunk: chunks.append(" ".join(current_chunk))
935
- if not chunks: print("Warning: Narration text is empty, skipping subtitles.")
936
- else:
937
- num_chunks = len(chunks); chunk_duration = audio_clip.duration / num_chunks
938
- start_time = 0.1
939
- for i, chunk_text in enumerate(chunks):
940
- # Ensure chunk end time doesn't exceed visual clip duration
941
- chunk_end_time = min(start_time + chunk_duration, visual_clip.duration - 0.05) # Leave tiny buffer
942
- actual_chunk_duration = max(0.1, chunk_end_time - start_time) # Min duration 0.1s
943
-
944
- try:
945
- txt_clip = TextClip(txt=chunk_text, fontsize=font_size, font=caption_font, color=caption_style_text_color,
946
- bg_color=caption_style_bg_color, method='label', align='center',
947
- size=(TARGET_RESOLUTION[0] * 0.8, None))
948
- txt_clip = txt_clip.set_position(('center', TARGET_RESOLUTION[1] * 0.80))
949
- txt_clip = txt_clip.set_start(start_time).set_duration(actual_chunk_duration)
950
- subtitle_clips.append(txt_clip)
951
- start_time = chunk_end_time # Next chunk starts where the last one ended
952
- except Exception as txt_err:
953
- print(f"ERROR creating TextClip for '{chunk_text}': {txt_err}. Skipping subtitle chunk.")
954
-
955
- final_clip = CompositeVideoClip([visual_clip] + subtitle_clips, size=TARGET_RESOLUTION) if subtitle_clips else visual_clip
956
- # Set audio with slight offset
957
- final_clip = final_clip.set_audio(audio_clip.set_start(0.15).set_duration(audio_clip.duration))
958
- # Ensure final clip duration matches segment duration closely
959
- final_clip = final_clip.set_duration(segment_duration)
960
-
961
- print(f"Clip segment created successfully. Duration: {final_clip.duration:.2f}s")
962
- # Explicitly close audio clip resource
963
- audio_clip.close()
964
- # Close visual clip resources if they have close methods (VideoClips do)
965
- if hasattr(visual_clip, 'close'):
966
- visual_clip.close()
967
- for sub in subtitle_clips:
968
- if hasattr(sub, 'close'): # TextClips might not have close
969
- sub.close()
970
-
971
- return final_clip
972
- except Exception as e:
973
- print(f"Error creating clip segment: {e}")
974
- import traceback
975
- traceback.print_exc()
976
- # Clean up resources if possible on error
977
- if 'audio_clip' in locals() and hasattr(audio_clip, 'close'): audio_clip.close()
978
- if 'visual_clip' in locals() and hasattr(visual_clip, 'close'): visual_clip.close()
979
- if 'video_clip' in locals() and hasattr(video_clip, 'close'): video_clip.close()
980
- return None
981
 
982
 
983
  # ---------------- Main Video Generation Function ---------------- #
 
 
 
 
 
 
 
 
 
 
 
984
 
985
- def generate_full_video(user_input, resolution_choice, caption_choice, music_file_info):
986
- """Main function orchestrating the video generation process."""
987
- # (Retained from previous versions - logic relies on the new generate_media)
988
- global TARGET_RESOLUTION, TEMP_FOLDER, USE_CAPTIONS
989
- print("\n--- Starting Video Generation ---"); start_time = time.time()
990
- if resolution_choice == "Short (9:16)": TARGET_RESOLUTION = (1080, 1920); print("Resolution set to: Short (1080x1920)")
991
- else: TARGET_RESOLUTION = (1920, 1080); print("Resolution set to: Full HD (1920x1080)")
992
- USE_CAPTIONS = (caption_choice == "Yes"); print(f"Captions Enabled: {USE_CAPTIONS}")
993
- TEMP_FOLDER = tempfile.mkdtemp(prefix="aivideo_"); print(f"Temporary folder created: {TEMP_FOLDER}")
994
- music_file_path = None
995
- if music_file_info is not None:
996
- try:
997
- music_file_path = os.path.join(TEMP_FOLDER, "background_music.mp3")
998
- # Ensure temp folder exists before copying
999
- os.makedirs(TEMP_FOLDER, exist_ok=True)
1000
- shutil.copy(music_file_info.name, music_file_path)
1001
- print(f"Background music copied to: {music_file_path}")
1002
- except Exception as e: print(f"Error handling uploaded music file: {e}"); music_file_path = None
1003
-
1004
- print("\nStep 1: Generating script..."); script_text = generate_script(user_input)
1005
- if not script_text: print("ERROR: Failed to generate script. Aborting."); shutil.rmtree(TEMP_FOLDER); return None, "Error: Script generation failed."
1006
- print("Script Generated:\n", script_text)
1007
-
1008
- print("\nStep 2: Parsing script..."); segments = parse_script(script_text)
1009
- if not segments: print("ERROR: Failed to parse script. Aborting."); shutil.rmtree(TEMP_FOLDER); return None, "Error: Script parsing failed."
1010
- print(f"Successfully parsed {len(segments)} segments.")
1011
-
1012
- print("\nStep 3: Generating media and TTS for each segment...")
1013
- segment_clips = []; total_segments = len(segments)
1014
- for i, segment in enumerate(segments):
1015
- print(f"\n--- Processing Segment {i+1}/{total_segments} ---")
1016
- print(f" Prompt: {segment['prompt']}")
1017
- print(f" Narration: {segment['narration']}")
1018
-
1019
- media_info = generate_media(segment['prompt']) # Using user's generate_media
1020
- if not media_info:
1021
- print(f"Warning: Failed to get media for segment {i+1} ('{segment['prompt']}'). Skipping this segment.")
1022
- continue
1023
 
1024
- tts_path = generate_tts(segment['narration'], selected_voice, voice_speed)
 
 
 
 
 
 
 
1025
  if not tts_path:
1026
- print(f"Warning: Failed to generate TTS for segment {i+1}. Skipping segment.")
1027
- if media_info and os.path.exists(media_info['path']):
1028
- try: os.remove(media_info['path']); print(f"Cleaned up unused media: {media_info['path']}")
1029
- except OSError as e: print(f"Error removing unused media {media_info['path']}: {e}")
1030
  continue
1031
-
1032
- # Pass current_index=i to create_segment if needed by that function (currently not)
1033
- clip = create_segment_clip(media_info, tts_path, segment['narration'])
 
 
 
 
 
 
1034
  if clip:
1035
- segment_clips.append(clip)
1036
  else:
1037
- print(f"Warning: Failed to create video clip for segment {i+1}. Skipping.")
1038
- # Clean up files for this failed segment
1039
- if media_info and os.path.exists(media_info['path']):
1040
- try: os.remove(media_info['path']); print(f"Cleaned up media for failed clip: {media_info['path']}")
1041
- except OSError as e: print(f"Error removing media for failed clip {media_info['path']}: {e}")
1042
- if tts_path and os.path.exists(tts_path):
1043
- try: os.remove(tts_path); print(f"Cleaned up TTS for failed clip: {tts_path}")
1044
- except OSError as e: print(f"Error removing TTS for failed clip {tts_path}: {e}")
1045
-
1046
-
1047
- if not segment_clips:
1048
- print("ERROR: No video clips were successfully created. Aborting.")
1049
- try: shutil.rmtree(TEMP_FOLDER)
1050
- except Exception as e: print(f"Error removing temp folder during abort: {e}")
1051
- return None, "Error: Failed to create any video segments. Check logs for media/TTS issues."
1052
-
1053
- print("\nStep 4: Concatenating video segments...");
1054
- final_video = None # Initialize to None
1055
- try:
1056
- valid_clips = [c for c in segment_clips if c is not None]
1057
- if not valid_clips: raise ValueError("No valid clips remained after processing.")
1058
- # Concatenate
1059
- final_video = concatenate_videoclips(valid_clips, method="compose")
1060
- print("Segments concatenated successfully.")
1061
- # Close individual clips after concatenation
1062
- for clip in valid_clips:
1063
- if hasattr(clip, 'close'): clip.close()
1064
-
1065
- except Exception as e:
1066
- print(f"ERROR: Failed to concatenate video clips: {e}");
1067
- # Clean up individual clips if concatenation failed
1068
- for clip in segment_clips:
1069
- if clip and hasattr(clip, 'close'): clip.close()
1070
- try: shutil.rmtree(TEMP_FOLDER)
1071
- except Exception as e_clean: print(f"Error removing temp folder after concat error: {e_clean}")
1072
- return None, f"Error: Concatenation failed: {e}"
1073
-
1074
-
1075
- print("\nStep 5: Adding background music...");
1076
- if final_video: # Only add music if concatenation was successful
1077
- final_video = add_background_music(final_video, music_file_path, bg_music_volume)
1078
- else:
1079
- print("Skipping background music as concatenation failed.")
1080
-
1081
 
1082
- print(f"\nStep 6: Exporting final video to '{OUTPUT_VIDEO_FILENAME}'..."); export_success = False
1083
- if final_video: # Only export if we have a final video object
1084
- try:
1085
- final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', audio_codec='aac', fps=fps, preset=preset, threads=4, logger='bar', ffmpeg_params=["-vsync", "vfr"]) # Added vsync param
1086
- print(f"Final video saved successfully as {OUTPUT_VIDEO_FILENAME}")
1087
- export_success = True
1088
- except Exception as e:
1089
- print(f"ERROR: Failed to write final video file: {e}"); import traceback; traceback.print_exc()
1090
- finally:
1091
- # Ensure final video resources are closed after export
1092
- if hasattr(final_video, 'close'): final_video.close()
1093
- print("Closed final video resources.")
1094
- else:
1095
- print("Skipping export as final video generation failed.")
1096
 
 
 
 
1097
 
1098
- print("\nStep 7: Cleaning up temporary files...");
1099
- try: shutil.rmtree(TEMP_FOLDER); print(f"Temporary folder {TEMP_FOLDER} removed.")
1100
- except Exception as e: print(f"Warning: Failed to remove temporary folder {TEMP_FOLDER}: {e}")
1101
 
1102
- end_time = time.time(); total_time = end_time - start_time
1103
- print(f"\n--- Video Generation Finished ---"); print(f"Total time: {total_time:.2f} seconds")
1104
- if export_success: return OUTPUT_VIDEO_FILENAME, f"Video generation complete! Time: {total_time:.2f}s"
1105
- elif not final_video: return None, f"Error: Video generation failed before export. Check logs. Time: {total_time:.2f}s"
1106
- else: return None, f"Error: Video export failed. Check logs. Time: {total_time:.2f}s"
1107
 
 
1108
 
1109
- # ---------------- Gradio Interface Definition ---------------- #
1110
- # (Retained from previous versions)
1111
  VOICE_CHOICES = {
1112
- 'Emma (US Female)': 'af_heart', 'Bella (US Female)': 'af_bella', 'Nicole (US Female)': 'af_nicole',
1113
- 'Sarah (US Female)': 'af_sarah', 'Michael (US Male)': 'am_michael', 'Eric (US Male)': 'am_eric',
1114
- 'Adam (US Male)': 'am_adam', 'Emma (UK Female)': 'bf_emma', 'Alice (UK Female)': 'bf_alice',
1115
- 'George (UK Male)': 'bm_george', 'Daniel (UK Male)': 'bm_daniel',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116
  }
1117
- def gradio_interface_handler(user_prompt, resolution, captions, bg_music, voice_name, video_prob, music_vol, video_fps, export_preset, tts_speed, caption_size):
1118
- print("\n--- Received Request from Gradio ---")
1119
- print(f"Prompt: {user_prompt[:50]}...")
1120
- print(f"Resolution: {resolution}")
1121
- print(f"Captions: {captions}")
1122
- print(f"Music File: {'Provided' if bg_music else 'None'}")
1123
- print(f"Voice: {voice_name}")
1124
- print(f"Video Probability: {video_prob}%")
1125
- print(f"Music Volume: {music_vol}")
1126
- print(f"FPS: {video_fps}")
1127
- print(f"Preset: {export_preset}")
1128
- print(f"TTS Speed: {tts_speed}")
1129
- print(f"Caption Size: {caption_size}")
1130
 
 
1131
  global selected_voice, voice_speed, font_size, video_clip_probability, bg_music_volume, fps, preset
1132
- selected_voice = VOICE_CHOICES.get(voice_name, 'af_heart')
1133
- voice_speed = tts_speed; font_size = caption_size; video_clip_probability = video_prob / 100.0
1134
- bg_music_volume = music_vol; fps = video_fps; preset = export_preset
1135
- video_path, status_message = generate_full_video(user_prompt, resolution, captions, bg_music)
1136
- print(f"Gradio Handler Status: {status_message}")
1137
- return video_path, status_message
1138
-
1139
- with gr.Blocks(theme=gr.themes.Soft()) as iface:
1140
- gr.Markdown("# 🎬 AI Documentary Video Generator")
1141
- gr.Markdown("Enter a topic or detailed instructions, customize settings, and generate a short documentary-style video.")
1142
- with gr.Row():
1143
- with gr.Column(scale=2):
1144
- prompt_input = gr.Textbox(label="Video Concept / Topic / Script", placeholder="e.g., 'The history of coffee'...", lines=4)
1145
- submit_button = gr.Button("Generate Video", variant="primary")
1146
- status_output = gr.Textbox(label="Status", interactive=False)
1147
- with gr.Column(scale=1): video_output = gr.Video(label="Generated Video")
1148
- with gr.Accordion("βš™οΈ Advanced Settings", open=False):
1149
- gr.Markdown("### Video & Audio Settings")
1150
- with gr.Row():
1151
- resolution_dd = gr.Dropdown(["Full HD (16:9)", "Short (9:16)"], label="Resolution", value="Full HD (16:9)")
1152
- caption_dd = gr.Radio(["Yes", "No"], label="Generate Captions", value="Yes")
1153
- music_upload = gr.File(label="Upload Background Music (MP3)", file_types=[".mp3"])
1154
- gr.Markdown("### Voice & Narration")
1155
- with gr.Row():
1156
- voice_dd = gr.Dropdown(choices=list(VOICE_CHOICES.keys()), label="Narration Voice", value="Emma (US Female)")
1157
- speed_slider = gr.Slider(0.5, 1.5, value=0.9, step=0.05, label="Voice Speed")
1158
- gr.Markdown("### Visuals & Style")
1159
- with gr.Row():
1160
- video_prob_slider = gr.Slider(0, 100, value=35, step=5, label="Video Clip % (vs. Images)")
1161
- caption_size_slider = gr.Slider(20, 80, value=45, step=1, label="Caption Font Size")
1162
- gr.Markdown("### Export Settings")
1163
- with gr.Row():
1164
- music_vol_slider = gr.Slider(0.0, 1.0, value=0.08, step=0.01, label="Background Music Volume")
1165
- fps_slider = gr.Slider(15, 60, value=30, step=1, label="Video FPS")
1166
- preset_dd = gr.Dropdown(choices=["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow"], value="veryfast", label="Export Quality/Speed Preset")
1167
- submit_button.click(fn=gradio_interface_handler, inputs=[prompt_input, resolution_dd, caption_dd, music_upload, voice_dd, video_prob_slider, music_vol_slider, fps_slider, preset_dd, speed_slider, caption_size_slider], outputs=[video_output, status_output])
 
 
 
 
1168
 
1169
  # Launch the interface
1170
  if __name__ == "__main__":
1171
- print("Launching Gradio Interface...")
1172
- if PEXELS_API_KEY == 'YOUR_PEXELS_API_KEY' or OPENROUTER_API_KEY == 'YOUR_OPENROUTER_API_KEY':
1173
- print("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
1174
- print("!!! WARNING: API Keys not set in the script. !!!")
1175
- print("!!! Please replace 'YOUR_PEXELS_API_KEY' and !!!")
1176
- print("!!! 'YOUR_OPENROUTER_API_KEY' with your actual keys. !!!")
1177
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
1178
- iface.launch(share=True, debug=True) # Share=True for public link, Debug=True for more logs
 
1
+
2
+
3
  # Import necessary libraries
4
+ from kokoro import KPipeline
5
 
6
  import soundfile as sf
7
  import torch
8
+
9
+ import soundfile as sf
10
  import os
11
+ from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip
12
+ from PIL import Image
13
  import tempfile
14
  import random
15
+ import cv2
16
  import math
17
+ import os, requests, io, time, re, random
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  from moviepy.editor import (
19
+ VideoFileClip, concatenate_videoclips, AudioFileClip, ImageClip,
20
+ CompositeVideoClip, TextClip, CompositeAudioClip
21
  )
22
+ import gradio as gr
23
+ import shutil
24
+ import os
25
  import moviepy.video.fx.all as vfx
26
+ import moviepy.config as mpy_config
 
27
  from pydub import AudioSegment
28
  from pydub.generators import Sine
29
 
30
+ from PIL import Image, ImageDraw, ImageFont
31
+ import numpy as np
32
+ from bs4 import BeautifulSoup
33
+ import base64
34
+ from urllib.parse import quote
35
+ import pysrt
36
+ from gtts import gTTS
37
+ import gradio as gr # Import Gradio
38
+
39
+ # Initialize Kokoro TTS pipeline (using American English)
40
+ pipeline = KPipeline(lang_code='a') # Use voice 'af_heart' for American English
41
+ # Ensure ImageMagick binary is set
42
+ mpy_config.change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})
43
+
44
  # ---------------- Global Configuration ---------------- #
45
  PEXELS_API_KEY = 'BhJqbcdm9Vi90KqzXKAhnEHGsuFNv4irXuOjWtT761U49lRzo03qBGna'
46
  OPENROUTER_API_KEY = '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'
47
  OPENROUTER_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free"
48
  OUTPUT_VIDEO_FILENAME = "final_video.mp4"
 
49
  USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
50
 
51
+
52
+
53
+ # Additional global variables needed for the Gradio interface
54
+ selected_voice = 'af_heart' # Default voice
55
+ voice_speed = 0.9 # Default voice speed
56
+ font_size = 45 # Default font size
57
+ video_clip_probability = 0.25 # Default probability for video clips
58
+ bg_music_volume = 0.08 # Default background music volume
59
+ fps = 30 # Default FPS
60
+ preset = "veryfast" # Default preset
 
 
 
61
  TARGET_RESOLUTION = None
62
+ CAPTION_COLOR = None
63
  TEMP_FOLDER = None
 
64
 
 
 
 
 
 
 
 
 
65
 
66
  # ---------------- Helper Functions ---------------- #
67
+ # (Your existing helper functions remain unchanged: generate_script, parse_script,
68
+ # search_pexels_videos, search_pexels_images, search_google_images, download_image,
69
+ # download_video, generate_media, generate_tts, apply_kenburns_effect,
70
+ # resize_to_fill, find_mp3_files, add_background_music, create_clip,
71
+ # fix_imagemagick_policy)
72
+
73
+ # Define these globally as they were in your original code but will be set per run
74
+ TARGET_RESOLUTION = None
75
+ CAPTION_COLOR = None
76
+ TEMP_FOLDER = None
77
 
78
  def generate_script(user_input):
79
+ """Generate documentary script with proper OpenRouter handling."""
 
80
  headers = {
81
  'Authorization': f'Bearer {OPENROUTER_API_KEY}',
82
+ 'HTTP-Referer': 'https://your-domain.com',
83
+ 'X-Title': 'AI Documentary Maker'
 
84
  }
 
85
 
86
+ prompt = f"""Short Documentary Script GeneratorInstructions:
87
+
88
+ If I say "use this," just output the script exactly as I gave it.
89
+ If I only give topics, generate a script based on them.
90
+ If I provide a full script, rewrite it without any changes. Make everything short simple and humarous funny and act as serious but humarous. And don't say anything off topic. Also alway say a funny statement to subscribe based on the video topic at the end. Use normal conversational text like a normal person talking and avoid AI phase make the statements humanize and normal conversational
91
+ And always talk negative views don't really wants any positive statement and the full script will be one video about one topic so keep in mind
92
  Formatting Rules:
93
+
94
+
95
+ Title in Square Brackets:
96
+
97
+
98
+ Each section starts with a one-word title inside [ ] (max two words if necessary).
99
+ This title will be used as a search term for Pexels footage.
100
+
101
+
102
+
103
+ Casual & Funny Narration:
104
+
105
+
106
+ Each section has 5-10 words of narration.
107
+ Keep it natural, funny, and unpredictable (not robotic, poetic, or rhythmic).
108
+
109
+
110
+
111
+ No Special Formatting:
112
+
113
+
114
+ No bold, italics, or special characters. You are a assistant AI your task is to create script. You aren't a chatbot. So, don't write extra text
115
+
116
+
117
+
118
+ Generalized Search Terms:
119
+
120
+
121
+ If a term is too specific, make it more general for Pexels search.
122
+
123
+
124
+
125
+ Scene-Specific Writing:
126
+
127
+
128
+ Each section describes only what should be shown in the video.
129
+
130
+
131
+
132
+ Output Only the Script, and also make it funny and humarous and helirous and also add to subscribe with a funny statement like subscribe now or .....
133
+
134
+
135
+ No extra text, just the script.
136
+
137
+
138
+
139
+ Example Output:
140
+ [North Korea]
141
+
142
+ Top 5 unknown facts about North Korea.
143
+
144
+ [Invisibility]
145
+
146
+ North Korea’s internet speed is so fast… it doesn’t exist.
147
+
148
+ [Leadership]
149
+
150
+ Kim Jong-un once won an election with 100% votes… against himself.
151
+
152
+ [Magic]
153
+
154
+ North Korea discovered time travel. That’s why their news is always from the past.
155
+
156
+ [Warning]
157
+
158
+ Subscribe now, or Kim Jong-un will send you a free one-way ticket… to North Korea.
159
+
160
+ [Freedom]
161
+
162
+ North Korean citizens can do anything… as long as it's government-approved.
163
+ Now here is the Topic/scrip: {user_input}
164
  """
165
+
166
  data = {
167
  'model': OPENROUTER_MODEL,
168
  'messages': [{'role': 'user', 'content': prompt}],
169
+ 'temperature': 0.4,
170
+ 'max_tokens': 5000
171
  }
172
+
173
  try:
174
  response = requests.post(
175
  'https://openrouter.ai/api/v1/chat/completions',
176
  headers=headers,
177
  json=data,
178
+ timeout=30
179
  )
180
+
181
+ if response.status_code == 200:
182
+ response_data = response.json()
183
+ if 'choices' in response_data and len(response_data['choices']) > 0:
184
+ return response_data['choices'][0]['message']['content']
185
+ else:
186
+ print("Unexpected response format:", response_data)
187
+ return None
188
  else:
189
+ print(f"API Error {response.status_code}: {response.text}")
190
  return None
191
+
 
 
 
 
 
 
 
 
192
  except Exception as e:
193
+ print(f"Request failed: {str(e)}")
194
  return None
195
 
196
  def parse_script(script_text):
197
+ """
198
+ Parse the generated script into a list of elements.
199
+ For each section, create two elements:
200
+ - A 'media' element using the section title as the visual prompt.
201
+ - A 'tts' element with the narration text, voice info, and computed duration.
202
+ """
203
+ sections = {}
204
  current_title = None
205
+ current_text = ""
206
+
207
+ try:
208
+ for line in script_text.splitlines():
209
+ line = line.strip()
210
+ if line.startswith("[") and "]" in line:
211
+ bracket_start = line.find("[")
212
+ bracket_end = line.find("]", bracket_start)
213
+ if bracket_start != -1 and bracket_end != -1:
214
+ if current_title is not None:
215
+ sections[current_title] = current_text.strip()
216
+ current_title = line[bracket_start+1:bracket_end]
217
+ current_text = line[bracket_end+1:].strip()
218
+ elif current_title:
219
+ current_text += line + " "
220
+
221
+ if current_title:
222
+ sections[current_title] = current_text.strip()
223
+
224
+ elements = []
225
+ for title, narration in sections.items():
226
+ if not title or not narration:
227
+ continue
228
+
229
+ media_element = {"type": "media", "prompt": title, "effects": "fade-in"}
230
+ words = narration.split()
231
+ duration = max(3, len(words) * 0.5)
232
+ tts_element = {"type": "tts", "text": narration, "voice": "en", "duration": duration}
233
+ elements.append(media_element)
234
+ elements.append(tts_element)
235
+
236
+ return elements
237
+ except Exception as e:
238
+ print(f"Error parsing script: {e}")
239
  return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
  def search_pexels_videos(query, pexels_api_key):
242
  """Search for a video on Pexels by query and return a random HD video."""
 
 
 
243
  headers = {'Authorization': pexels_api_key}
244
  base_url = "https://api.pexels.com/videos/search"
245
  num_pages = 3
 
247
 
248
  max_retries = 3
249
  retry_delay = 1
 
250
 
251
  search_query = query
252
  all_videos = []
 
253
 
254
  for page in range(1, num_pages + 1):
 
255
  for attempt in range(max_retries):
256
  try:
257
+ params = {"query": search_query, "per_page": videos_per_page, "page": page}
258
+ response = requests.get(base_url, headers=headers, params=params, timeout=10)
259
 
260
  if response.status_code == 200:
261
  data = response.json()
262
  videos = data.get("videos", [])
263
 
264
  if not videos:
265
+ print(f"No videos found on page {page}.")
266
+ break
 
 
267
 
 
268
  for video in videos:
269
  video_files = video.get("video_files", [])
270
+ for file in video_files:
271
+ if file.get("quality") == "hd":
272
+ all_videos.append(file.get("link"))
273
+ break
274
+
275
+ break
276
+
 
 
 
 
 
 
 
 
 
 
277
  elif response.status_code == 429:
278
+ print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
279
  time.sleep(retry_delay)
280
  retry_delay *= 2
 
 
 
 
281
  else:
282
+ print(f"Error fetching videos: {response.status_code} {response.text}")
283
  if attempt < max_retries - 1:
284
+ print(f"Retrying in {retry_delay} seconds...")
285
  time.sleep(retry_delay)
286
  retry_delay *= 2
287
  else:
288
+ break
 
289
 
 
 
 
 
290
  except requests.exceptions.RequestException as e:
291
+ print(f"Request exception: {e}")
292
  if attempt < max_retries - 1:
293
+ print(f"Retrying in {retry_delay} seconds...")
294
  time.sleep(retry_delay)
295
  retry_delay *= 2
296
  else:
297
+ break
 
 
 
 
298
 
299
  if all_videos:
300
  random_video = random.choice(all_videos)
301
+ print(f"Selected random video from {len(all_videos)} HD videos")
302
  return random_video
303
  else:
304
+ print("No suitable videos found after searching all pages.")
305
  return None
306
 
307
  def search_pexels_images(query, pexels_api_key):
308
  """Search for an image on Pexels by query."""
 
 
 
309
  headers = {'Authorization': pexels_api_key}
310
  url = "https://api.pexels.com/v1/search"
311
+ params = {"query": query, "per_page": 5, "orientation": "landscape"}
 
312
 
313
  max_retries = 3
314
  retry_delay = 1
 
 
 
315
 
316
  for attempt in range(max_retries):
317
  try:
318
+ response = requests.get(url, headers=headers, params=params, timeout=10)
319
 
320
  if response.status_code == 200:
321
  data = response.json()
322
  photos = data.get("photos", [])
323
  if photos:
324
+ photo = random.choice(photos[:min(5, len(photos))])
325
+ img_url = photo.get("src", {}).get("original")
326
+ return img_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
  else:
328
  print(f"No images found for query: {query}")
329
+ return None
330
 
 
 
 
331
  elif response.status_code == 429:
332
+ print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
333
  time.sleep(retry_delay)
334
  retry_delay *= 2
 
 
 
 
335
  else:
336
+ print(f"Error fetching images: {response.status_code} {response.text}")
337
  if attempt < max_retries - 1:
338
+ print(f"Retrying in {retry_delay} seconds...")
339
  time.sleep(retry_delay)
340
  retry_delay *= 2
 
 
341
 
 
 
 
 
342
  except requests.exceptions.RequestException as e:
343
+ print(f"Request exception: {e}")
344
  if attempt < max_retries - 1:
345
+ print(f"Retrying in {retry_delay} seconds...")
346
  time.sleep(retry_delay)
347
  retry_delay *= 2
 
 
348
 
349
  print(f"No Pexels images found for query: {query} after all attempts")
350
  return None
351
 
 
352
  def search_google_images(query):
353
+ """Search for images on Google Images (for news-related queries)"""
 
354
  try:
355
+ search_url = f"https://www.google.com/search?q={quote(query)}&tbm=isch"
356
  headers = {"User-Agent": USER_AGENT}
357
  response = requests.get(search_url, headers=headers, timeout=10)
 
358
  soup = BeautifulSoup(response.text, "html.parser")
359
 
360
  img_tags = soup.find_all("img")
361
  image_urls = []
362
  for img in img_tags:
363
+ src = img.get("src", "")
364
+ if src.startswith("http") and "gstatic" not in src:
365
+ image_urls.append(src)
 
366
 
367
  if image_urls:
368
+ return random.choice(image_urls[:5]) if len(image_urls) >= 5 else image_urls[0]
 
 
369
  else:
370
+ print(f"No Google Images found for query: {query}")
371
  return None
 
 
 
372
  except Exception as e:
373
+ print(f"Error in Google Images search: {e}")
374
  return None
375
 
 
376
  def download_image(image_url, filename):
377
  """Download an image from a URL to a local file with enhanced error handling."""
378
  try:
379
  headers = {"User-Agent": USER_AGENT}
380
  print(f"Downloading image from: {image_url} to {filename}")
381
+ response = requests.get(image_url, headers=headers, stream=True, timeout=15)
382
  response.raise_for_status()
383
 
 
 
 
384
  with open(filename, 'wb') as f:
385
  for chunk in response.iter_content(chunk_size=8192):
386
  f.write(chunk)
387
 
388
+ print(f"Image downloaded successfully to: {filename}")
389
 
 
390
  try:
391
  img = Image.open(filename)
392
+ img.verify()
 
 
 
393
  img = Image.open(filename)
394
  if img.mode != 'RGB':
395
+ img = img.convert('RGB')
396
+ img.save(filename)
397
+ print(f"Image validated and processed: {filename}")
398
+ return filename
399
+ except Exception as e_validate:
400
+ print(f"Downloaded file is not a valid image: {e_validate}")
401
+ if os.path.exists(filename):
402
+ os.remove(filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  return None
 
 
 
 
 
404
 
405
  except requests.exceptions.RequestException as e_download:
406
+ print(f"Image download error: {e_download}")
407
+ if os.path.exists(filename):
408
+ os.remove(filename)
409
  return None
410
+ except Exception as e_general:
411
+ print(f"General error during image processing: {e_general}")
412
+ if os.path.exists(filename):
413
+ os.remove(filename)
414
  return None
415
 
416
  def download_video(video_url, filename):
417
  """Download a video from a URL to a local file."""
418
  try:
419
+ response = requests.get(video_url, stream=True, timeout=30)
 
 
420
  response.raise_for_status()
 
 
 
 
421
  with open(filename, 'wb') as f:
422
+ for chunk in response.iter_content(chunk_size=8192):
423
  f.write(chunk)
424
+ print(f"Video downloaded successfully to: {filename}")
425
+ return filename
426
+ except Exception as e:
 
 
 
 
 
 
427
  print(f"Video download error: {e}")
428
+ if os.path.exists(filename):
429
+ os.remove(filename)
430
  return None
 
 
 
431
 
432
  def generate_media(prompt, user_image=None, current_index=0, total_segments=1):
433
  """
434
+ Generate a visual asset by first searching for a video or using a specific search strategy.
435
+ For news-related queries, use Google Images.
436
+ Returns a dict: {'path': <file_path>, 'asset_type': 'video' or 'image'}.
437
  """
438
+ safe_prompt = re.sub(r'[^\w\s-]', '', prompt).strip().replace(' ', '_')
 
 
 
 
439
 
 
440
  if "news" in prompt.lower():
441
+ print(f"News-related query detected: {prompt}. Using Google Images...")
442
  image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_news.jpg")
443
+ image_url = search_google_images(prompt)
444
  if image_url:
445
  downloaded_image = download_image(image_url, image_file)
446
  if downloaded_image:
447
+ print(f"News image saved to {downloaded_image}")
448
  return {"path": downloaded_image, "asset_type": "image"}
 
 
449
  else:
450
+ print(f"Google Images search failed for prompt: {prompt}")
 
451
 
 
452
  if random.random() < video_clip_probability:
 
453
  video_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_video.mp4")
454
  video_url = search_pexels_videos(prompt, PEXELS_API_KEY)
455
  if video_url:
456
  downloaded_video = download_video(video_url, video_file)
457
  if downloaded_video:
458
+ print(f"Video asset saved to {downloaded_video}")
459
  return {"path": downloaded_video, "asset_type": "video"}
 
 
460
  else:
461
+ print(f"Pexels video search failed for prompt: {prompt}")
 
 
462
 
463
+ image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}.jpg")
 
 
 
464
  image_url = search_pexels_images(prompt, PEXELS_API_KEY)
465
  if image_url:
466
  downloaded_image = download_image(image_url, image_file)
467
  if downloaded_image:
468
+ print(f"Image asset saved to {downloaded_image}")
469
  return {"path": downloaded_image, "asset_type": "image"}
470
  else:
471
+ print(f"Pexels image download failed for prompt: {prompt}")
472
+
473
+ fallback_terms = ["nature", "people", "landscape", "technology", "business"]
474
+ for term in fallback_terms:
475
+ print(f"Trying fallback image search with term: {term}")
476
+ fallback_file = os.path.join(TEMP_FOLDER, f"fallback_{term}.jpg")
 
 
 
 
 
477
  fallback_url = search_pexels_images(term, PEXELS_API_KEY)
478
  if fallback_url:
479
  downloaded_fallback = download_image(fallback_url, fallback_file)
480
  if downloaded_fallback:
481
+ print(f"Fallback image saved to {downloaded_fallback}")
482
  return {"path": downloaded_fallback, "asset_type": "image"}
483
  else:
484
+ print(f"Fallback image download failed for term: {term}")
485
  else:
486
+ print(f"Fallback image search failed for term: {term}")
 
 
 
487
 
488
+ print(f"Failed to generate visual asset for prompt: {prompt}")
 
489
  return None
490
 
491
+ def generate_silent_audio(duration, sample_rate=24000):
492
+ """Generate a silent WAV audio file lasting 'duration' seconds."""
493
+ num_samples = int(duration * sample_rate)
494
+ silence = np.zeros(num_samples, dtype=np.float32)
495
+ silent_path = os.path.join(TEMP_FOLDER, f"silent_{int(time.time())}.wav")
496
+ sf.write(silent_path, silence, sample_rate)
497
+ print(f"Silent audio generated: {silent_path}")
498
+ return silent_path
499
 
500
+ def generate_tts(text, voice):
501
+ """
502
+ Generate TTS audio using Kokoro, falling back to gTTS or silent audio if needed.
503
+ """
504
+ safe_text = re.sub(r'[^\w\s-]', '', text[:10]).strip().replace(' ', '_')
505
+ file_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.wav")
506
+
507
+ if os.path.exists(file_path):
508
+ print(f"Using cached TTS for text '{text[:10]}...'")
509
+ return file_path
510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511
  try:
512
+ kokoro_voice = selected_voice if voice == 'en' else voice
513
+ generator = pipeline(text, voice=kokoro_voice, speed=voice_speed, split_pattern=r'\n+')
514
+ audio_segments = []
515
+ for i, (gs, ps, audio) in enumerate(generator):
516
+ audio_segments.append(audio)
517
+ full_audio = np.concatenate(audio_segments) if len(audio_segments) > 1 else audio_segments[0]
518
+ sf.write(file_path, full_audio, 24000)
519
+ print(f"TTS audio saved to {file_path} (Kokoro)")
520
+ return file_path
521
+ except Exception as e:
522
+ print(f"Error with Kokoro TTS: {e}")
523
+ try:
524
+ print("Falling back to gTTS...")
525
+ tts = gTTS(text=text, lang='en')
526
+ mp3_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.mp3")
527
+ tts.save(mp3_path)
528
+ audio = AudioSegment.from_mp3(mp3_path)
529
+ audio.export(file_path, format="wav")
530
+ os.remove(mp3_path)
531
+ print(f"Fallback TTS saved to {file_path} (gTTS)")
532
+ return file_path
533
+ except Exception as fallback_error:
534
+ print(f"Both TTS methods failed: {fallback_error}")
535
+ return generate_silent_audio(duration=max(3, len(text.split()) * 0.5))
536
+
537
+ def apply_kenburns_effect(clip, target_resolution, effect_type=None):
538
+ """Apply a smooth Ken Burns effect with a single movement pattern."""
539
+ target_w, target_h = target_resolution
540
+ clip_aspect = clip.w / clip.h
541
+ target_aspect = target_w / target_h
542
 
543
+ if clip_aspect > target_aspect:
544
+ new_height = target_h
545
+ new_width = int(new_height * clip_aspect)
546
+ else:
547
+ new_width = target_w
548
+ new_height = int(new_width / clip_aspect)
549
+
550
+ clip = clip.resize(newsize=(new_width, new_height))
551
+ base_scale = 1.15
552
+ new_width = int(new_width * base_scale)
553
+ new_height = int(new_height * base_scale)
554
+ clip = clip.resize(newsize=(new_width, new_height))
555
+
556
+ max_offset_x = new_width - target_w
557
+ max_offset_y = new_height - target_h
558
+
559
+ available_effects = ["zoom-in", "zoom-out", "pan-left", "pan-right", "up-left"]
560
+ if effect_type is None or effect_type == "random":
561
+ effect_type = random.choice(available_effects)
562
+
563
+ if effect_type == "zoom-in":
564
+ start_zoom = 0.9
565
+ end_zoom = 1.1
566
+ start_center = (new_width / 2, new_height / 2)
567
+ end_center = start_center
568
+ elif effect_type == "zoom-out":
569
+ start_zoom = 1.1
570
+ end_zoom = 0.9
571
+ start_center = (new_width / 2, new_height / 2)
572
+ end_center = start_center
573
+ elif effect_type == "pan-left":
574
+ start_zoom = 1.0
575
+ end_zoom = 1.0
576
+ start_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
577
+ end_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
578
+ elif effect_type == "pan-right":
579
+ start_zoom = 1.0
580
+ end_zoom = 1.0
581
+ start_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
582
+ end_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
583
+ elif effect_type == "up-left":
584
+ start_zoom = 1.0
585
+ end_zoom = 1.0
586
+ start_center = (max_offset_x + target_w / 2, max_offset_y + target_h / 2)
587
+ end_center = (target_w / 2, target_h / 2)
588
+ else:
589
+ raise ValueError(f"Unsupported effect_type: {effect_type}")
590
+
591
+ def transform_frame(get_frame, t):
592
+ frame = get_frame(t)
593
+ ratio = t / clip.duration if clip.duration > 0 else 0
594
+ ratio = 0.5 - 0.5 * math.cos(math.pi * ratio)
595
+ current_zoom = start_zoom + (end_zoom - start_zoom) * ratio
596
+ crop_w = int(target_w / current_zoom)
597
+ crop_h = int(target_h / current_zoom)
598
+ current_center_x = start_center[0] + (end_center[0] - start_center[0]) * ratio
599
+ current_center_y = start_center[1] + (end_center[1] - start_center[1]) * ratio
600
+ min_center_x = crop_w / 2
601
+ max_center_x = new_width - crop_w / 2
602
+ min_center_y = crop_h / 2
603
+ max_center_y = new_height - crop_h / 2
604
+ current_center_x = max(min_center_x, min(current_center_x, max_center_x))
605
+ current_center_y = max(min_center_y, min(current_center_y, max_center_y))
606
+ cropped_frame = cv2.getRectSubPix(frame, (crop_w, crop_h), (current_center_x, current_center_y))
607
+ resized_frame = cv2.resize(cropped_frame, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
608
+ return resized_frame
609
+
610
+ return clip.fl(transform_frame)
611
+
612
+ def resize_to_fill(clip, target_resolution):
613
+ """Resize and crop a clip to fill the target resolution while maintaining aspect ratio."""
614
  target_w, target_h = target_resolution
615
+ clip_aspect = clip.w / clip.h
 
 
 
 
 
 
 
 
 
 
616
  target_aspect = target_w / target_h
617
 
618
+ if clip_aspect > target_aspect:
619
+ clip = clip.resize(height=target_h)
620
+ crop_amount = (clip.w - target_w) / 2
621
+ clip = clip.crop(x1=crop_amount, x2=clip.w - crop_amount, y1=0, y2=clip.h)
622
  else:
623
+ clip = clip.resize(width=target_w)
624
+ crop_amount = (clip.h - target_h) / 2
625
+ clip = clip.crop(x1=0, x2=clip.w, y1=crop_amount, y2=clip.h - crop_amount)
626
+
627
+ return clip
628
+
629
+ def find_mp3_files():
630
+ """Search for any MP3 files in the current directory and subdirectories."""
631
+ mp3_files = []
632
+ for root, dirs, files in os.walk('.'):
633
+ for file in files:
634
+ if file.endswith('.mp3'):
635
+ mp3_path = os.path.join(root, file)
636
+ mp3_files.append(mp3_path)
637
+ print(f"Found MP3 file: {mp3_path}")
638
+ return mp3_files[0] if mp3_files else None
639
+
640
+ def add_background_music(final_video, bg_music_volume=0.10):
641
+ """Add background music to the final video using any MP3 file found."""
642
+ try:
643
+ bg_music_path = "music.mp3"
644
+ if bg_music_path and os.path.exists(bg_music_path):
645
+ print(f"Adding background music from: {bg_music_path}")
646
+ bg_music = AudioFileClip(bg_music_path)
647
+ if bg_music.duration < final_video.duration:
648
+ loops_needed = math.ceil(final_video.duration / bg_music.duration)
649
+ bg_segments = [bg_music] * loops_needed
650
+ bg_music = concatenate_audioclips(bg_segments)
651
+ bg_music = bg_music.subclip(0, final_video.duration)
652
+ bg_music = bg_music.volumex(bg_music_volume)
653
+ video_audio = final_video.audio
654
+ mixed_audio = CompositeAudioClip([video_audio, bg_music])
655
+ final_video = final_video.set_audio(mixed_audio)
656
+ print("Background music added successfully")
657
+ else:
658
+ print("No MP3 files found, skipping background music")
659
+ return final_video
660
+ except Exception as e:
661
+ print(f"Error adding background music: {e}")
662
+ print("Continuing without background music")
663
+ return final_video
664
+
665
+ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, narration_text=None, segment_index=0):
666
+ """Create a video clip with synchronized subtitles and narration."""
667
+ try:
668
+ print(f"Creating clip #{segment_index} with asset_type: {asset_type}, media_path: {media_path}")
669
+ if not os.path.exists(media_path) or not os.path.exists(tts_path):
670
+ print("Missing media or TTS file")
671
+ return None
672
 
673
+ audio_clip = AudioFileClip(tts_path).audio_fadeout(0.2)
674
+ audio_duration = audio_clip.duration
675
+ target_duration = audio_duration + 0.2
676
+
677
+ if asset_type == "video":
678
+ clip = VideoFileClip(media_path)
679
+ clip = resize_to_fill(clip, TARGET_RESOLUTION)
680
+ if clip.duration < target_duration:
681
+ clip = clip.loop(duration=target_duration)
682
+ else:
683
+ clip = clip.subclip(0, target_duration)
684
+ elif asset_type == "image":
685
+ img = Image.open(media_path)
686
+ if img.mode != 'RGB':
687
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp:
688
+ img.convert('RGB').save(temp.name)
689
+ media_path = temp.name
690
+ img.close()
691
+ clip = ImageClip(media_path).set_duration(target_duration)
692
+ clip = apply_kenburns_effect(clip, TARGET_RESOLUTION)
693
+ clip = clip.fadein(0.3).fadeout(0.3)
694
+ else:
695
+ return None
696
+
697
+ if narration_text and CAPTION_COLOR != "transparent":
698
+ try:
699
+ words = narration_text.split()
700
+ chunks = []
701
+ current_chunk = []
702
+ for word in words:
703
+ current_chunk.append(word)
704
+ if len(current_chunk) >= 5:
705
+ chunks.append(' '.join(current_chunk))
706
+ current_chunk = []
707
+ if current_chunk:
708
+ chunks.append(' '.join(current_chunk))
709
+
710
+ chunk_duration = audio_duration / len(chunks)
711
+ subtitle_clips = []
712
+ subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
713
 
714
+ for i, chunk_text in enumerate(chunks):
715
+ start_time = i * chunk_duration
716
+ end_time = (i + 1) * chunk_duration
717
+ txt_clip = TextClip(
718
+ chunk_text,
719
+ fontsize=45,
720
+ font='Arial-Bold',
721
+ color=CAPTION_COLOR,
722
+ bg_color='rgba(0, 0, 0, 0.25)',
723
+ method='caption',
724
+ align='center',
725
+ stroke_width=2,
726
+ stroke_color=CAPTION_COLOR,
727
+ size=(TARGET_RESOLUTION[0] * 0.8, None)
728
+ ).set_start(start_time).set_end(end_time)
729
+ txt_clip = txt_clip.set_position(('center', subtitle_y_position))
730
+ subtitle_clips.append(txt_clip)
731
+
732
+ clip = CompositeVideoClip([clip] + subtitle_clips)
733
+ except Exception as sub_error:
734
+ print(f"Subtitle error: {sub_error}")
735
+ txt_clip = TextClip(
736
+ narration_text,
737
+ fontsize=font_size,
738
+ color=CAPTION_COLOR,
739
+ align='center',
740
+ size=(TARGET_RESOLUTION[0] * 0.7, None)
741
+ ).set_position(('center', int(TARGET_RESOLUTION[1] / 3))).set_duration(clip.duration)
742
+ clip = CompositeVideoClip([clip, txt_clip])
743
+
744
+ clip = clip.set_audio(audio_clip)
745
+ print(f"Clip created: {clip.duration:.1f}s")
746
+ return clip
747
+ except Exception as e:
748
+ print(f"Error in create_clip: {str(e)}")
749
+ return None
750
+
751
+ def fix_imagemagick_policy():
752
+ """Fix ImageMagick security policies."""
753
  try:
754
+ print("Attempting to fix ImageMagick security policies...")
755
+ policy_paths = [
756
+ "/etc/ImageMagick-6/policy.xml",
757
+ "/etc/ImageMagick-7/policy.xml",
758
+ "/etc/ImageMagick/policy.xml",
759
+ "/usr/local/etc/ImageMagick-7/policy.xml"
760
+ ]
761
+ found_policy = next((path for path in policy_paths if os.path.exists(path)), None)
762
+ if not found_policy:
763
+ print("No policy.xml found. Using alternative subtitle method.")
764
+ return False
765
+ print(f"Modifying policy file at {found_policy}")
766
+ os.system(f"sudo cp {found_policy} {found_policy}.bak")
767
+ os.system(f"sudo sed -i 's/rights=\"none\"/rights=\"read|write\"/g' {found_policy}")
768
+ os.system(f"sudo sed -i 's/<policy domain=\"path\" pattern=\"@\*\"[^>]*>/<policy domain=\"path\" pattern=\"@*\" rights=\"read|write\"/g' {found_policy}")
769
+ os.system(f"sudo sed -i 's/<policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"[^>]*>/<!-- <policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"> -->/g' {found_policy}")
770
+ print("ImageMagick policies updated successfully.")
771
+ return True
772
  except Exception as e:
773
+ print(f"Error fixing policies: {e}")
774
+ return False
775
+
776
+
777
+
778
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779
 
 
 
 
 
 
780
 
781
 
 
782
 
 
 
 
 
783
 
784
 
 
 
 
 
 
 
 
 
 
785
 
786
 
 
787
 
 
 
 
 
 
 
 
788
 
789
 
 
 
 
 
 
 
 
 
 
 
 
790
 
 
 
791
 
 
 
792
 
 
 
 
 
 
793
 
794
 
 
 
795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796
 
 
 
 
 
 
 
797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
798
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
800
 
801
 
802
  # ---------------- Main Video Generation Function ---------------- #
803
+ def generate_video(user_input, resolution, caption_option):
804
+ """Generate a video based on user input via Gradio."""
805
+ global TARGET_RESOLUTION, CAPTION_COLOR, TEMP_FOLDER
806
+
807
+ # Set resolution
808
+ if resolution == "Full":
809
+ TARGET_RESOLUTION = (1920, 1080)
810
+ elif resolution == "Short":
811
+ TARGET_RESOLUTION = (1080, 1920)
812
+ else:
813
+ TARGET_RESOLUTION = (1920, 1080) # Default
814
 
815
+ # Set caption color
816
+ CAPTION_COLOR = "white" if caption_option == "Yes" else "transparent"
817
+
818
+ # Create a unique temporary folder
819
+ TEMP_FOLDER = tempfile.mkdtemp()
820
+
821
+ # Fix ImageMagick policy
822
+ fix_success = fix_imagemagick_policy()
823
+ if not fix_success:
824
+ print("Will use alternative methods if needed")
825
+
826
+ print("Generating script from API...")
827
+ script = generate_script(user_input)
828
+ if not script:
829
+ print("Failed to generate script.")
830
+ shutil.rmtree(TEMP_FOLDER)
831
+ return None
832
+ print("Generated Script:\n", script)
833
+ elements = parse_script(script)
834
+ if not elements:
835
+ print("Failed to parse script into elements.")
836
+ shutil.rmtree(TEMP_FOLDER)
837
+ return None
838
+ print(f"Parsed {len(elements)//2} script segments.")
839
+
840
+ paired_elements = []
841
+ for i in range(0, len(elements), 2):
842
+ if i + 1 < len(elements):
843
+ paired_elements.append((elements[i], elements[i + 1]))
844
+
845
+ if not paired_elements:
846
+ print("No valid script segments found.")
847
+ shutil.rmtree(TEMP_FOLDER)
848
+ return None
 
 
 
 
849
 
850
+ clips = []
851
+ for idx, (media_elem, tts_elem) in enumerate(paired_elements):
852
+ print(f"\nProcessing segment {idx+1}/{len(paired_elements)} with prompt: '{media_elem['prompt']}'")
853
+ media_asset = generate_media(media_elem['prompt'], current_index=idx, total_segments=len(paired_elements))
854
+ if not media_asset:
855
+ print(f"Skipping segment {idx+1} due to missing media asset.")
856
+ continue
857
+ tts_path = generate_tts(tts_elem['text'], tts_elem['voice'])
858
  if not tts_path:
859
+ print(f"Skipping segment {idx+1} due to TTS generation failure.")
 
 
 
860
  continue
861
+ clip = create_clip(
862
+ media_path=media_asset['path'],
863
+ asset_type=media_asset['asset_type'],
864
+ tts_path=tts_path,
865
+ duration=tts_elem['duration'],
866
+ effects=media_elem.get('effects', 'fade-in'),
867
+ narration_text=tts_elem['text'],
868
+ segment_index=idx
869
+ )
870
  if clip:
871
+ clips.append(clip)
872
  else:
873
+ print(f"Clip creation failed for segment {idx+1}.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
874
 
875
+ if not clips:
876
+ print("No clips were successfully created.")
877
+ shutil.rmtree(TEMP_FOLDER)
878
+ return None
 
 
 
 
 
 
 
 
 
 
879
 
880
+ print("\nConcatenating clips...")
881
+ final_video = concatenate_videoclips(clips, method="compose")
882
+ final_video = add_background_music(final_video, bg_music_volume=bg_music_volume)
883
 
884
+ print(f"Exporting final video to {OUTPUT_VIDEO_FILENAME}...")
885
+ final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', fps=fps, preset=preset)
886
+ print(f"Final video saved as {OUTPUT_VIDEO_FILENAME}")
887
 
888
+ # Clean up
889
+ print("Cleaning up temporary files...")
890
+ shutil.rmtree(TEMP_FOLDER)
891
+ print("Temporary files removed.")
 
892
 
893
+ return OUTPUT_VIDEO_FILENAME
894
 
895
+ # ---------------- Gradio Interface ---------------- #
 
896
  VOICE_CHOICES = {
897
+ 'Emma (Female)': 'af_heart',
898
+ 'Bella (Female)': 'af_bella',
899
+ 'Nicole (Female)': 'af_nicole',
900
+ 'Aoede (Female)': 'af_aoede',
901
+ 'Kore (Female)': 'af_kore',
902
+ 'Sarah (Female)': 'af_sarah',
903
+ 'Nova (Female)': 'af_nova',
904
+ 'Sky (Female)': 'af_sky',
905
+ 'Alloy (Female)': 'af_alloy',
906
+ 'Jessica (Female)': 'af_jessica',
907
+ 'River (Female)': 'af_river',
908
+ 'Michael (Male)': 'am_michael',
909
+ 'Fenrir (Male)': 'am_fenrir',
910
+ 'Puck (Male)': 'am_puck',
911
+ 'Echo (Male)': 'am_echo',
912
+ 'Eric (Male)': 'am_eric',
913
+ 'Liam (Male)': 'am_liam',
914
+ 'Onyx (Male)': 'am_onyx',
915
+ 'Santa (Male)': 'am_santa',
916
+ 'Adam (Male)': 'am_adam',
917
+ 'Emma πŸ‡¬πŸ‡§ (Female)': 'bf_emma',
918
+ 'Isabella πŸ‡¬πŸ‡§ (Female)': 'bf_isabella',
919
+ 'Alice πŸ‡¬πŸ‡§ (Female)': 'bf_alice',
920
+ 'Lily πŸ‡¬πŸ‡§ (Female)': 'bf_lily',
921
+ 'George πŸ‡¬πŸ‡§ (Male)': 'bm_george',
922
+ 'Fable πŸ‡¬πŸ‡§ (Male)': 'bm_fable',
923
+ 'Lewis πŸ‡¬πŸ‡§ (Male)': 'bm_lewis',
924
+ 'Daniel πŸ‡¬πŸ‡§ (Male)': 'bm_daniel'
925
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
926
 
927
+ def generate_video_with_options(user_input, resolution, caption_option, music_file, voice, vclip_prob, bg_vol, video_fps, video_preset, v_speed, caption_size):
928
  global selected_voice, voice_speed, font_size, video_clip_probability, bg_music_volume, fps, preset
929
+
930
+ # Update global variables with user selections
931
+ selected_voice = VOICE_CHOICES[voice]
932
+ voice_speed = v_speed
933
+ font_size = caption_size
934
+ video_clip_probability = vclip_prob / 100 # Convert from percentage to decimal
935
+ bg_music_volume = bg_vol
936
+ fps = video_fps
937
+ preset = video_preset
938
+
939
+ # Handle music upload
940
+ if music_file is not None:
941
+ target_path = "music.mp3"
942
+ shutil.copy(music_file.name, target_path)
943
+ print(f"Uploaded music saved as: {target_path}")
944
+
945
+ # Generate the video
946
+ return generate_video(user_input, resolution, caption_option)
947
+
948
+ # Create the Gradio interface
949
+ iface = gr.Interface(
950
+ fn=generate_video_with_options,
951
+ inputs=[
952
+ gr.Textbox(label="Video Concept", placeholder="Enter your video concept here..."),
953
+ gr.Radio(["Full", "Short"], label="Resolution", value="Full"),
954
+ gr.Radio(["Yes", "No"], label="Captions", value="Yes"),
955
+ gr.File(label="Upload Background Music (MP3)", file_types=[".mp3"]),
956
+ gr.Dropdown(choices=list(VOICE_CHOICES.keys()), label="Choose Voice", value="Emma (Female)"),
957
+ gr.Slider(0, 100, value=25, step=1, label="Video Clip Usage Probability (%)"),
958
+ gr.Slider(0.0, 1.0, value=0.08, step=0.01, label="Background Music Volume"),
959
+ gr.Slider(10, 60, value=30, step=1, label="Video FPS"),
960
+ gr.Dropdown(choices=["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow"],
961
+ value="veryfast", label="Export Preset"),
962
+ gr.Slider(0.5, 1.5, value=0.9, step=0.05, label="Voice Speed"),
963
+ gr.Slider(20, 100, value=45, step=1, label="Caption Font Size")
964
+ ],
965
+ outputs=gr.Video(label="Generated Video"),
966
+ title="AI Documentary Video Generator",
967
+ description="Create short documentary videos with AI. Upload music, choose voice, and customize settings."
968
+ )
969
 
970
  # Launch the interface
971
  if __name__ == "__main__":
972
+ iface.launch(share=True)