testdeep123 commited on
Commit
878d3d4
Β·
verified Β·
1 Parent(s): 0ed73f6

Upload 2 files

Browse files
Files changed (2) hide show
  1. app (5).py +939 -0
  2. requirements (2).txt +12 -0
app (5).py ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # Import necessary libraries
4
+ from kokoro import KPipeline
5
+
6
+ import soundfile as sf
7
+ import torch
8
+
9
+ import soundfile as sf
10
+ import os
11
+ from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip
12
+ from PIL import Image
13
+ import tempfile
14
+ import random
15
+ import cv2
16
+ import math
17
+ import os, requests, io, time, re, random
18
+ from moviepy.editor import (
19
+ VideoFileClip, concatenate_videoclips, AudioFileClip, ImageClip,
20
+ CompositeVideoClip, TextClip, CompositeAudioClip
21
+ )
22
+ import gradio as gr
23
+ import shutil
24
+ import os
25
+ import moviepy.video.fx.all as vfx
26
+ import moviepy.config as mpy_config
27
+ from pydub import AudioSegment
28
+ from pydub.generators import Sine
29
+
30
+ from PIL import Image, ImageDraw, ImageFont
31
+ import numpy as np
32
+ from bs4 import BeautifulSoup
33
+ import base64
34
+ from urllib.parse import quote
35
+ import pysrt
36
+ from gtts import gTTS
37
+ import gradio as gr # Import Gradio
38
+
39
+ # Initialize Kokoro TTS pipeline (using American English)
40
+ pipeline = KPipeline(lang_code='a') # Use voice 'af_heart' for American English
41
+ # Ensure ImageMagick binary is set
42
+ mpy_config.change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})
43
+
44
+ # ---------------- Global Configuration ---------------- #
45
+ PEXELS_API_KEY = 'BhJqbcdm9Vi90KqzXKAhnEHGsuFNv4irXuOjWtT761U49lRzo03qBGna'
46
+ OPENROUTER_API_KEY = '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'
47
+ OPENROUTER_MODEL = "google/gemini-2.0-flash-exp:free"
48
+ OUTPUT_VIDEO_FILENAME = "final_video.mp4"
49
+ USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
50
+
51
+
52
+
53
+ # Additional global variables needed for the Gradio interface
54
+ selected_voice = 'af_heart' # Default voice
55
+ voice_speed = 0.9 # Default voice speed
56
+ font_size = 45 # Default font size
57
+ video_clip_probability = 0.25 # Default probability for video clips
58
+ bg_music_volume = 0.08 # Default background music volume
59
+ fps = 30 # Default FPS
60
+ preset = "veryfast" # Default preset
61
+ TARGET_RESOLUTION = None
62
+ CAPTION_COLOR = None
63
+ TEMP_FOLDER = None
64
+
65
+
66
+ # ---------------- Helper Functions ---------------- #
67
+ # (Your existing helper functions remain unchanged: generate_script, parse_script,
68
+ # search_pexels_videos, search_pexels_images, search_google_images, download_image,
69
+ # download_video, generate_media, generate_tts, apply_kenburns_effect,
70
+ # resize_to_fill, find_mp3_files, add_background_music, create_clip,
71
+ # fix_imagemagick_policy)
72
+
73
+ # Define these globally as they were in your original code but will be set per run
74
+ TARGET_RESOLUTION = None
75
+ CAPTION_COLOR = None
76
+ TEMP_FOLDER = None
77
+
78
+ def generate_script(user_input):
79
+ """Generate documentary script with proper OpenRouter handling."""
80
+ headers = {
81
+ 'Authorization': f'Bearer {OPENROUTER_API_KEY}',
82
+ 'HTTP-Referer': 'https://your-domain.com',
83
+ 'X-Title': 'AI Documentary Maker'
84
+ }
85
+
86
+ prompt = f"""You're a professional documentary narrator. Your job is to write a serious, natural, and informative video script based on one topic.
87
+
88
+ The script should sound like a real human voiceover from a TV show or documentary β€” clear, factual, and engaging, like something you'd hear on National Geographic or a news report.
89
+
90
+ Structure:
91
+ - Break the script into scenes using [Tags]. Each tag is a short title (1–2 words) that describes the visual or idea.
92
+ - Under each tag, write one sentence (max 12 words) that fits the tag and continues the topic.
93
+ - The full script should make sense as one connected narration β€” no randomness.
94
+ - Use natural, formal English. No slang, no fake AI language, and no robotic tone.
95
+ - Do not use humor, sarcasm, or casual language. This is a serious narration.
96
+ - No emotion-sound words like β€œaww,” β€œeww,” β€œwhoa,” etc.
97
+ - Do not use numbers like 1, 2, 3 β€” write them out as one, two, three.
98
+ - At the end, add a [Subscribe] tag with a formal or respectful reason to follow or subscribe.
99
+
100
+ Only output the script. No extra comments or text.
101
+
102
+ Example:
103
+
104
+ [Ocean]
105
+
106
+ The ocean covers over seventy percent of the Earth's surface.
107
+
108
+ [Currents]
109
+
110
+ Ocean currents distribute heat and regulate global climate patterns.
111
+
112
+ [Coral Reefs]
113
+
114
+ These ecosystems support over one million species of marine life.
115
+
116
+ [Pollution]
117
+
118
+ Plastic waste threatens marine biodiversity and food chains.
119
+
120
+ [Climate Impact]
121
+
122
+ Rising temperatures are causing coral bleaching and habitat loss.
123
+
124
+ [Subscribe]
125
+
126
+ Follow to explore more about the changing planet we live on.
127
+
128
+
129
+
130
+ Now here is the Topic/scrip: {user_input}
131
+ """
132
+
133
+ data = {
134
+ 'model': OPENROUTER_MODEL,
135
+ 'messages': [{'role': 'user', 'content': prompt}],
136
+ 'temperature': 0.4,
137
+ 'max_tokens': 5000
138
+ }
139
+
140
+ try:
141
+ response = requests.post(
142
+ 'https://openrouter.ai/api/v1/chat/completions',
143
+ headers=headers,
144
+ json=data,
145
+ timeout=30
146
+ )
147
+
148
+ if response.status_code == 200:
149
+ response_data = response.json()
150
+ if 'choices' in response_data and len(response_data['choices']) > 0:
151
+ return response_data['choices'][0]['message']['content']
152
+ else:
153
+ print("Unexpected response format:", response_data)
154
+ return None
155
+ else:
156
+ print(f"API Error {response.status_code}: {response.text}")
157
+ return None
158
+
159
+ except Exception as e:
160
+ print(f"Request failed: {str(e)}")
161
+ return None
162
+
163
+ def parse_script(script_text):
164
+ """
165
+ Parse the generated script into a list of elements.
166
+ For each section, create two elements:
167
+ - A 'media' element using the section title as the visual prompt.
168
+ - A 'tts' element with the narration text, voice info, and computed duration.
169
+ """
170
+ sections = {}
171
+ current_title = None
172
+ current_text = ""
173
+
174
+ try:
175
+ for line in script_text.splitlines():
176
+ line = line.strip()
177
+ if line.startswith("[") and "]" in line:
178
+ bracket_start = line.find("[")
179
+ bracket_end = line.find("]", bracket_start)
180
+ if bracket_start != -1 and bracket_end != -1:
181
+ if current_title is not None:
182
+ sections[current_title] = current_text.strip()
183
+ current_title = line[bracket_start+1:bracket_end]
184
+ current_text = line[bracket_end+1:].strip()
185
+ elif current_title:
186
+ current_text += line + " "
187
+
188
+ if current_title:
189
+ sections[current_title] = current_text.strip()
190
+
191
+ elements = []
192
+ for title, narration in sections.items():
193
+ if not title or not narration:
194
+ continue
195
+
196
+ media_element = {"type": "media", "prompt": title, "effects": "fade-in"}
197
+ words = narration.split()
198
+ duration = max(3, len(words) * 0.5)
199
+ tts_element = {"type": "tts", "text": narration, "voice": "en", "duration": duration}
200
+ elements.append(media_element)
201
+ elements.append(tts_element)
202
+
203
+ return elements
204
+ except Exception as e:
205
+ print(f"Error parsing script: {e}")
206
+ return []
207
+
208
+ def search_pexels_videos(query, pexels_api_key):
209
+ """Search for a video on Pexels by query and return a random HD video."""
210
+ headers = {'Authorization': pexels_api_key}
211
+ base_url = "https://api.pexels.com/videos/search"
212
+ num_pages = 3
213
+ videos_per_page = 15
214
+
215
+ max_retries = 3
216
+ retry_delay = 1
217
+
218
+ search_query = query
219
+ all_videos = []
220
+
221
+ for page in range(1, num_pages + 1):
222
+ for attempt in range(max_retries):
223
+ try:
224
+ params = {"query": search_query, "per_page": videos_per_page, "page": page}
225
+ response = requests.get(base_url, headers=headers, params=params, timeout=10)
226
+
227
+ if response.status_code == 200:
228
+ data = response.json()
229
+ videos = data.get("videos", [])
230
+
231
+ if not videos:
232
+ print(f"No videos found on page {page}.")
233
+ break
234
+
235
+ for video in videos:
236
+ video_files = video.get("video_files", [])
237
+ for file in video_files:
238
+ if file.get("quality") == "hd":
239
+ all_videos.append(file.get("link"))
240
+ break
241
+
242
+ break
243
+
244
+ elif response.status_code == 429:
245
+ print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
246
+ time.sleep(retry_delay)
247
+ retry_delay *= 2
248
+ else:
249
+ print(f"Error fetching videos: {response.status_code} {response.text}")
250
+ if attempt < max_retries - 1:
251
+ print(f"Retrying in {retry_delay} seconds...")
252
+ time.sleep(retry_delay)
253
+ retry_delay *= 2
254
+ else:
255
+ break
256
+
257
+ except requests.exceptions.RequestException as e:
258
+ print(f"Request exception: {e}")
259
+ if attempt < max_retries - 1:
260
+ print(f"Retrying in {retry_delay} seconds...")
261
+ time.sleep(retry_delay)
262
+ retry_delay *= 2
263
+ else:
264
+ break
265
+
266
+ if all_videos:
267
+ random_video = random.choice(all_videos)
268
+ print(f"Selected random video from {len(all_videos)} HD videos")
269
+ return random_video
270
+ else:
271
+ print("No suitable videos found after searching all pages.")
272
+ return None
273
+
274
+ def search_pexels_images(query, pexels_api_key):
275
+ """Search for an image on Pexels by query."""
276
+ headers = {'Authorization': pexels_api_key}
277
+ url = "https://api.pexels.com/v1/search"
278
+ params = {"query": query, "per_page": 5, "orientation": "landscape"}
279
+
280
+ max_retries = 3
281
+ retry_delay = 1
282
+
283
+ for attempt in range(max_retries):
284
+ try:
285
+ response = requests.get(url, headers=headers, params=params, timeout=10)
286
+
287
+ if response.status_code == 200:
288
+ data = response.json()
289
+ photos = data.get("photos", [])
290
+ if photos:
291
+ photo = random.choice(photos[:min(5, len(photos))])
292
+ img_url = photo.get("src", {}).get("original")
293
+ return img_url
294
+ else:
295
+ print(f"No images found for query: {query}")
296
+ return None
297
+
298
+ elif response.status_code == 429:
299
+ print(f"Rate limit hit (attempt {attempt+1}/{max_retries}). Retrying in {retry_delay} seconds...")
300
+ time.sleep(retry_delay)
301
+ retry_delay *= 2
302
+ else:
303
+ print(f"Error fetching images: {response.status_code} {response.text}")
304
+ if attempt < max_retries - 1:
305
+ print(f"Retrying in {retry_delay} seconds...")
306
+ time.sleep(retry_delay)
307
+ retry_delay *= 2
308
+
309
+ except requests.exceptions.RequestException as e:
310
+ print(f"Request exception: {e}")
311
+ if attempt < max_retries - 1:
312
+ print(f"Retrying in {retry_delay} seconds...")
313
+ time.sleep(retry_delay)
314
+ retry_delay *= 2
315
+
316
+ print(f"No Pexels images found for query: {query} after all attempts")
317
+ return None
318
+
319
+ def search_google_images(query):
320
+ """Search for images on Google Images (for news-related queries)"""
321
+ try:
322
+ search_url = f"https://www.google.com/search?q={quote(query)}&tbm=isch"
323
+ headers = {"User-Agent": USER_AGENT}
324
+ response = requests.get(search_url, headers=headers, timeout=10)
325
+ soup = BeautifulSoup(response.text, "html.parser")
326
+
327
+ img_tags = soup.find_all("img")
328
+ image_urls = []
329
+ for img in img_tags:
330
+ src = img.get("src", "")
331
+ if src.startswith("http") and "gstatic" not in src:
332
+ image_urls.append(src)
333
+
334
+ if image_urls:
335
+ return random.choice(image_urls[:5]) if len(image_urls) >= 5 else image_urls[0]
336
+ else:
337
+ print(f"No Google Images found for query: {query}")
338
+ return None
339
+ except Exception as e:
340
+ print(f"Error in Google Images search: {e}")
341
+ return None
342
+
343
+ def download_image(image_url, filename):
344
+ """Download an image from a URL to a local file with enhanced error handling."""
345
+ try:
346
+ headers = {"User-Agent": USER_AGENT}
347
+ print(f"Downloading image from: {image_url} to {filename}")
348
+ response = requests.get(image_url, headers=headers, stream=True, timeout=15)
349
+ response.raise_for_status()
350
+
351
+ with open(filename, 'wb') as f:
352
+ for chunk in response.iter_content(chunk_size=8192):
353
+ f.write(chunk)
354
+
355
+ print(f"Image downloaded successfully to: {filename}")
356
+
357
+ try:
358
+ img = Image.open(filename)
359
+ img.verify()
360
+ img = Image.open(filename)
361
+ if img.mode != 'RGB':
362
+ img = img.convert('RGB')
363
+ img.save(filename)
364
+ print(f"Image validated and processed: {filename}")
365
+ return filename
366
+ except Exception as e_validate:
367
+ print(f"Downloaded file is not a valid image: {e_validate}")
368
+ if os.path.exists(filename):
369
+ os.remove(filename)
370
+ return None
371
+
372
+ except requests.exceptions.RequestException as e_download:
373
+ print(f"Image download error: {e_download}")
374
+ if os.path.exists(filename):
375
+ os.remove(filename)
376
+ return None
377
+ except Exception as e_general:
378
+ print(f"General error during image processing: {e_general}")
379
+ if os.path.exists(filename):
380
+ os.remove(filename)
381
+ return None
382
+
383
+ def download_video(video_url, filename):
384
+ """Download a video from a URL to a local file."""
385
+ try:
386
+ response = requests.get(video_url, stream=True, timeout=30)
387
+ response.raise_for_status()
388
+ with open(filename, 'wb') as f:
389
+ for chunk in response.iter_content(chunk_size=8192):
390
+ f.write(chunk)
391
+ print(f"Video downloaded successfully to: {filename}")
392
+ return filename
393
+ except Exception as e:
394
+ print(f"Video download error: {e}")
395
+ if os.path.exists(filename):
396
+ os.remove(filename)
397
+ return None
398
+
399
+ def generate_media(prompt, user_image=None, current_index=0, total_segments=1):
400
+ """
401
+ Generate a visual asset by first searching for a video or using a specific search strategy.
402
+ For news-related queries, use Google Images.
403
+ Returns a dict: {'path': <file_path>, 'asset_type': 'video' or 'image'}.
404
+ """
405
+ safe_prompt = re.sub(r'[^\w\s-]', '', prompt).strip().replace(' ', '_')
406
+
407
+ if "news" in prompt.lower():
408
+ print(f"News-related query detected: {prompt}. Using Google Images...")
409
+ image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_news.jpg")
410
+ image_url = search_google_images(prompt)
411
+ if image_url:
412
+ downloaded_image = download_image(image_url, image_file)
413
+ if downloaded_image:
414
+ print(f"News image saved to {downloaded_image}")
415
+ return {"path": downloaded_image, "asset_type": "image"}
416
+ else:
417
+ print(f"Google Images search failed for prompt: {prompt}")
418
+
419
+ if random.random() < video_clip_probability:
420
+ video_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_video.mp4")
421
+ video_url = search_pexels_videos(prompt, PEXELS_API_KEY)
422
+ if video_url:
423
+ downloaded_video = download_video(video_url, video_file)
424
+ if downloaded_video:
425
+ print(f"Video asset saved to {downloaded_video}")
426
+ return {"path": downloaded_video, "asset_type": "video"}
427
+ else:
428
+ print(f"Pexels video search failed for prompt: {prompt}")
429
+
430
+ image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}.jpg")
431
+ image_url = search_pexels_images(prompt, PEXELS_API_KEY)
432
+ if image_url:
433
+ downloaded_image = download_image(image_url, image_file)
434
+ if downloaded_image:
435
+ print(f"Image asset saved to {downloaded_image}")
436
+ return {"path": downloaded_image, "asset_type": "image"}
437
+ else:
438
+ print(f"Pexels image download failed for prompt: {prompt}")
439
+
440
+ fallback_terms = ["nature", "people", "landscape", "technology", "business"]
441
+ for term in fallback_terms:
442
+ print(f"Trying fallback image search with term: {term}")
443
+ fallback_file = os.path.join(TEMP_FOLDER, f"fallback_{term}.jpg")
444
+ fallback_url = search_pexels_images(term, PEXELS_API_KEY)
445
+ if fallback_url:
446
+ downloaded_fallback = download_image(fallback_url, fallback_file)
447
+ if downloaded_fallback:
448
+ print(f"Fallback image saved to {downloaded_fallback}")
449
+ return {"path": downloaded_fallback, "asset_type": "image"}
450
+ else:
451
+ print(f"Fallback image download failed for term: {term}")
452
+ else:
453
+ print(f"Fallback image search failed for term: {term}")
454
+
455
+ print(f"Failed to generate visual asset for prompt: {prompt}")
456
+ return None
457
+
458
+ def generate_silent_audio(duration, sample_rate=24000):
459
+ """Generate a silent WAV audio file lasting 'duration' seconds."""
460
+ num_samples = int(duration * sample_rate)
461
+ silence = np.zeros(num_samples, dtype=np.float32)
462
+ silent_path = os.path.join(TEMP_FOLDER, f"silent_{int(time.time())}.wav")
463
+ sf.write(silent_path, silence, sample_rate)
464
+ print(f"Silent audio generated: {silent_path}")
465
+ return silent_path
466
+
467
+ def generate_tts(text, voice):
468
+ """
469
+ Generate TTS audio using Kokoro, falling back to gTTS or silent audio if needed.
470
+ """
471
+ safe_text = re.sub(r'[^\w\s-]', '', text[:10]).strip().replace(' ', '_')
472
+ file_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.wav")
473
+
474
+ if os.path.exists(file_path):
475
+ print(f"Using cached TTS for text '{text[:10]}...'")
476
+ return file_path
477
+
478
+ try:
479
+ kokoro_voice = selected_voice if voice == 'en' else voice
480
+ generator = pipeline(text, voice=kokoro_voice, speed=voice_speed, split_pattern=r'\n+')
481
+ audio_segments = []
482
+ for i, (gs, ps, audio) in enumerate(generator):
483
+ audio_segments.append(audio)
484
+ full_audio = np.concatenate(audio_segments) if len(audio_segments) > 1 else audio_segments[0]
485
+ sf.write(file_path, full_audio, 24000)
486
+ print(f"TTS audio saved to {file_path} (Kokoro)")
487
+ return file_path
488
+ except Exception as e:
489
+ print(f"Error with Kokoro TTS: {e}")
490
+ try:
491
+ print("Falling back to gTTS...")
492
+ tts = gTTS(text=text, lang='en')
493
+ mp3_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.mp3")
494
+ tts.save(mp3_path)
495
+ audio = AudioSegment.from_mp3(mp3_path)
496
+ audio.export(file_path, format="wav")
497
+ os.remove(mp3_path)
498
+ print(f"Fallback TTS saved to {file_path} (gTTS)")
499
+ return file_path
500
+ except Exception as fallback_error:
501
+ print(f"Both TTS methods failed: {fallback_error}")
502
+ return generate_silent_audio(duration=max(3, len(text.split()) * 0.5))
503
+
504
+ def apply_kenburns_effect(clip, target_resolution, effect_type=None):
505
+ """Apply a smooth Ken Burns effect with a single movement pattern."""
506
+ target_w, target_h = target_resolution
507
+ clip_aspect = clip.w / clip.h
508
+ target_aspect = target_w / target_h
509
+
510
+ if clip_aspect > target_aspect:
511
+ new_height = target_h
512
+ new_width = int(new_height * clip_aspect)
513
+ else:
514
+ new_width = target_w
515
+ new_height = int(new_width / clip_aspect)
516
+
517
+ clip = clip.resize(newsize=(new_width, new_height))
518
+ base_scale = 1.15
519
+ new_width = int(new_width * base_scale)
520
+ new_height = int(new_height * base_scale)
521
+ clip = clip.resize(newsize=(new_width, new_height))
522
+
523
+ max_offset_x = new_width - target_w
524
+ max_offset_y = new_height - target_h
525
+
526
+ available_effects = ["zoom-in", "zoom-out", "pan-left", "pan-right", "up-left"]
527
+ if effect_type is None or effect_type == "random":
528
+ effect_type = random.choice(available_effects)
529
+
530
+ if effect_type == "zoom-in":
531
+ start_zoom = 0.9
532
+ end_zoom = 1.1
533
+ start_center = (new_width / 2, new_height / 2)
534
+ end_center = start_center
535
+ elif effect_type == "zoom-out":
536
+ start_zoom = 1.1
537
+ end_zoom = 0.9
538
+ start_center = (new_width / 2, new_height / 2)
539
+ end_center = start_center
540
+ elif effect_type == "pan-left":
541
+ start_zoom = 1.0
542
+ end_zoom = 1.0
543
+ start_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
544
+ end_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
545
+ elif effect_type == "pan-right":
546
+ start_zoom = 1.0
547
+ end_zoom = 1.0
548
+ start_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
549
+ end_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
550
+ elif effect_type == "up-left":
551
+ start_zoom = 1.0
552
+ end_zoom = 1.0
553
+ start_center = (max_offset_x + target_w / 2, max_offset_y + target_h / 2)
554
+ end_center = (target_w / 2, target_h / 2)
555
+ else:
556
+ raise ValueError(f"Unsupported effect_type: {effect_type}")
557
+
558
+ def transform_frame(get_frame, t):
559
+ frame = get_frame(t)
560
+ ratio = t / clip.duration if clip.duration > 0 else 0
561
+ ratio = 0.5 - 0.5 * math.cos(math.pi * ratio)
562
+ current_zoom = start_zoom + (end_zoom - start_zoom) * ratio
563
+ crop_w = int(target_w / current_zoom)
564
+ crop_h = int(target_h / current_zoom)
565
+ current_center_x = start_center[0] + (end_center[0] - start_center[0]) * ratio
566
+ current_center_y = start_center[1] + (end_center[1] - start_center[1]) * ratio
567
+ min_center_x = crop_w / 2
568
+ max_center_x = new_width - crop_w / 2
569
+ min_center_y = crop_h / 2
570
+ max_center_y = new_height - crop_h / 2
571
+ current_center_x = max(min_center_x, min(current_center_x, max_center_x))
572
+ current_center_y = max(min_center_y, min(current_center_y, max_center_y))
573
+ cropped_frame = cv2.getRectSubPix(frame, (crop_w, crop_h), (current_center_x, current_center_y))
574
+ resized_frame = cv2.resize(cropped_frame, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
575
+ return resized_frame
576
+
577
+ return clip.fl(transform_frame)
578
+
579
+ def resize_to_fill(clip, target_resolution):
580
+ """Resize and crop a clip to fill the target resolution while maintaining aspect ratio."""
581
+ target_w, target_h = target_resolution
582
+ clip_aspect = clip.w / clip.h
583
+ target_aspect = target_w / target_h
584
+
585
+ if clip_aspect > target_aspect:
586
+ clip = clip.resize(height=target_h)
587
+ crop_amount = (clip.w - target_w) / 2
588
+ clip = clip.crop(x1=crop_amount, x2=clip.w - crop_amount, y1=0, y2=clip.h)
589
+ else:
590
+ clip = clip.resize(width=target_w)
591
+ crop_amount = (clip.h - target_h) / 2
592
+ clip = clip.crop(x1=0, x2=clip.w, y1=crop_amount, y2=clip.h - crop_amount)
593
+
594
+ return clip
595
+
596
+ def find_mp3_files():
597
+ """Search for any MP3 files in the current directory and subdirectories."""
598
+ mp3_files = []
599
+ for root, dirs, files in os.walk('.'):
600
+ for file in files:
601
+ if file.endswith('.mp3'):
602
+ mp3_path = os.path.join(root, file)
603
+ mp3_files.append(mp3_path)
604
+ print(f"Found MP3 file: {mp3_path}")
605
+ return mp3_files[0] if mp3_files else None
606
+
607
+ def add_background_music(final_video, bg_music_volume=0.10):
608
+ """Add background music to the final video using any MP3 file found."""
609
+ try:
610
+ bg_music_path = "music.mp3"
611
+ if bg_music_path and os.path.exists(bg_music_path):
612
+ print(f"Adding background music from: {bg_music_path}")
613
+ bg_music = AudioFileClip(bg_music_path)
614
+ if bg_music.duration < final_video.duration:
615
+ loops_needed = math.ceil(final_video.duration / bg_music.duration)
616
+ bg_segments = [bg_music] * loops_needed
617
+ bg_music = concatenate_audioclips(bg_segments)
618
+ bg_music = bg_music.subclip(0, final_video.duration)
619
+ bg_music = bg_music.volumex(bg_music_volume)
620
+ video_audio = final_video.audio
621
+ mixed_audio = CompositeAudioClip([video_audio, bg_music])
622
+ final_video = final_video.set_audio(mixed_audio)
623
+ print("Background music added successfully")
624
+ else:
625
+ print("No MP3 files found, skipping background music")
626
+ return final_video
627
+ except Exception as e:
628
+ print(f"Error adding background music: {e}")
629
+ print("Continuing without background music")
630
+ return final_video
631
+
632
+ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, narration_text=None, segment_index=0):
633
+ """Create a video clip with synchronized subtitles and narration."""
634
+ try:
635
+ print(f"Creating clip #{segment_index} with asset_type: {asset_type}, media_path: {media_path}")
636
+ if not os.path.exists(media_path) or not os.path.exists(tts_path):
637
+ print("Missing media or TTS file")
638
+ return None
639
+
640
+ audio_clip = AudioFileClip(tts_path).audio_fadeout(0.2)
641
+ audio_duration = audio_clip.duration
642
+ target_duration = audio_duration + 0.2
643
+
644
+ if asset_type == "video":
645
+ clip = VideoFileClip(media_path)
646
+ clip = resize_to_fill(clip, TARGET_RESOLUTION)
647
+ if clip.duration < target_duration:
648
+ clip = clip.loop(duration=target_duration)
649
+ else:
650
+ clip = clip.subclip(0, target_duration)
651
+ elif asset_type == "image":
652
+ img = Image.open(media_path)
653
+ if img.mode != 'RGB':
654
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp:
655
+ img.convert('RGB').save(temp.name)
656
+ media_path = temp.name
657
+ img.close()
658
+ clip = ImageClip(media_path).set_duration(target_duration)
659
+ clip = apply_kenburns_effect(clip, TARGET_RESOLUTION)
660
+ clip = clip.fadein(0.3).fadeout(0.3)
661
+ else:
662
+ return None
663
+
664
+ if narration_text and CAPTION_COLOR != "transparent":
665
+ try:
666
+ words = narration_text.split()
667
+ chunks = []
668
+ current_chunk = []
669
+ for word in words:
670
+ current_chunk.append(word)
671
+ if len(current_chunk) >= 5:
672
+ chunks.append(' '.join(current_chunk))
673
+ current_chunk = []
674
+ if current_chunk:
675
+ chunks.append(' '.join(current_chunk))
676
+
677
+ chunk_duration = audio_duration / len(chunks)
678
+ subtitle_clips = []
679
+ subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
680
+
681
+ for i, chunk_text in enumerate(chunks):
682
+ start_time = i * chunk_duration
683
+ end_time = (i + 1) * chunk_duration
684
+ txt_clip = TextClip(
685
+ chunk_text,
686
+ fontsize=45,
687
+ font='Arial-Bold',
688
+ color=CAPTION_COLOR,
689
+ bg_color='rgba(0, 0, 0, 0.25)',
690
+ method='caption',
691
+ align='center',
692
+ stroke_width=2,
693
+ stroke_color=CAPTION_COLOR,
694
+ size=(TARGET_RESOLUTION[0] * 0.8, None)
695
+ ).set_start(start_time).set_end(end_time)
696
+ txt_clip = txt_clip.set_position(('center', subtitle_y_position))
697
+ subtitle_clips.append(txt_clip)
698
+
699
+ clip = CompositeVideoClip([clip] + subtitle_clips)
700
+ except Exception as sub_error:
701
+ print(f"Subtitle error: {sub_error}")
702
+ txt_clip = TextClip(
703
+ narration_text,
704
+ fontsize=font_size,
705
+ color=CAPTION_COLOR,
706
+ align='center',
707
+ size=(TARGET_RESOLUTION[0] * 0.7, None)
708
+ ).set_position(('center', int(TARGET_RESOLUTION[1] / 3))).set_duration(clip.duration)
709
+ clip = CompositeVideoClip([clip, txt_clip])
710
+
711
+ clip = clip.set_audio(audio_clip)
712
+ print(f"Clip created: {clip.duration:.1f}s")
713
+ return clip
714
+ except Exception as e:
715
+ print(f"Error in create_clip: {str(e)}")
716
+ return None
717
+
718
+ def fix_imagemagick_policy():
719
+ """Fix ImageMagick security policies."""
720
+ try:
721
+ print("Attempting to fix ImageMagick security policies...")
722
+ policy_paths = [
723
+ "/etc/ImageMagick-6/policy.xml",
724
+ "/etc/ImageMagick-7/policy.xml",
725
+ "/etc/ImageMagick/policy.xml",
726
+ "/usr/local/etc/ImageMagick-7/policy.xml"
727
+ ]
728
+ found_policy = next((path for path in policy_paths if os.path.exists(path)), None)
729
+ if not found_policy:
730
+ print("No policy.xml found. Using alternative subtitle method.")
731
+ return False
732
+ print(f"Modifying policy file at {found_policy}")
733
+ os.system(f"sudo cp {found_policy} {found_policy}.bak")
734
+ os.system(f"sudo sed -i 's/rights=\"none\"/rights=\"read|write\"/g' {found_policy}")
735
+ os.system(f"sudo sed -i 's/<policy domain=\"path\" pattern=\"@\*\"[^>]*>/<policy domain=\"path\" pattern=\"@*\" rights=\"read|write\"/g' {found_policy}")
736
+ os.system(f"sudo sed -i 's/<policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"[^>]*>/<!-- <policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"> -->/g' {found_policy}")
737
+ print("ImageMagick policies updated successfully.")
738
+ return True
739
+ except Exception as e:
740
+ print(f"Error fixing policies: {e}")
741
+ return False
742
+
743
+
744
+
745
+
746
+
747
+
748
+
749
+
750
+
751
+
752
+
753
+
754
+
755
+
756
+
757
+
758
+
759
+
760
+
761
+
762
+
763
+
764
+
765
+
766
+
767
+
768
+
769
+ # ---------------- Main Video Generation Function ---------------- #
770
+ def generate_video(user_input, resolution, caption_option):
771
+ """Generate a video based on user input via Gradio."""
772
+ global TARGET_RESOLUTION, CAPTION_COLOR, TEMP_FOLDER
773
+
774
+ # Set resolution
775
+ if resolution == "Full":
776
+ TARGET_RESOLUTION = (1920, 1080)
777
+ elif resolution == "Short":
778
+ TARGET_RESOLUTION = (1080, 1920)
779
+ else:
780
+ TARGET_RESOLUTION = (1920, 1080) # Default
781
+
782
+ # Set caption color
783
+ CAPTION_COLOR = "white" if caption_option == "Yes" else "transparent"
784
+
785
+ # Create a unique temporary folder
786
+ TEMP_FOLDER = tempfile.mkdtemp()
787
+
788
+ # Fix ImageMagick policy
789
+ fix_success = fix_imagemagick_policy()
790
+ if not fix_success:
791
+ print("Will use alternative methods if needed")
792
+
793
+ print("Generating script from API...")
794
+ script = generate_script(user_input)
795
+ if not script:
796
+ print("Failed to generate script.")
797
+ shutil.rmtree(TEMP_FOLDER)
798
+ return None
799
+ print("Generated Script:\n", script)
800
+ elements = parse_script(script)
801
+ if not elements:
802
+ print("Failed to parse script into elements.")
803
+ shutil.rmtree(TEMP_FOLDER)
804
+ return None
805
+ print(f"Parsed {len(elements)//2} script segments.")
806
+
807
+ paired_elements = []
808
+ for i in range(0, len(elements), 2):
809
+ if i + 1 < len(elements):
810
+ paired_elements.append((elements[i], elements[i + 1]))
811
+
812
+ if not paired_elements:
813
+ print("No valid script segments found.")
814
+ shutil.rmtree(TEMP_FOLDER)
815
+ return None
816
+
817
+ clips = []
818
+ for idx, (media_elem, tts_elem) in enumerate(paired_elements):
819
+ print(f"\nProcessing segment {idx+1}/{len(paired_elements)} with prompt: '{media_elem['prompt']}'")
820
+ media_asset = generate_media(media_elem['prompt'], current_index=idx, total_segments=len(paired_elements))
821
+ if not media_asset:
822
+ print(f"Skipping segment {idx+1} due to missing media asset.")
823
+ continue
824
+ tts_path = generate_tts(tts_elem['text'], tts_elem['voice'])
825
+ if not tts_path:
826
+ print(f"Skipping segment {idx+1} due to TTS generation failure.")
827
+ continue
828
+ clip = create_clip(
829
+ media_path=media_asset['path'],
830
+ asset_type=media_asset['asset_type'],
831
+ tts_path=tts_path,
832
+ duration=tts_elem['duration'],
833
+ effects=media_elem.get('effects', 'fade-in'),
834
+ narration_text=tts_elem['text'],
835
+ segment_index=idx
836
+ )
837
+ if clip:
838
+ clips.append(clip)
839
+ else:
840
+ print(f"Clip creation failed for segment {idx+1}.")
841
+
842
+ if not clips:
843
+ print("No clips were successfully created.")
844
+ shutil.rmtree(TEMP_FOLDER)
845
+ return None
846
+
847
+ print("\nConcatenating clips...")
848
+ final_video = concatenate_videoclips(clips, method="compose")
849
+ final_video = add_background_music(final_video, bg_music_volume=bg_music_volume)
850
+
851
+ print(f"Exporting final video to {OUTPUT_VIDEO_FILENAME}...")
852
+ final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', fps=fps, preset=preset)
853
+ print(f"Final video saved as {OUTPUT_VIDEO_FILENAME}")
854
+
855
+ # Clean up
856
+ print("Cleaning up temporary files...")
857
+ shutil.rmtree(TEMP_FOLDER)
858
+ print("Temporary files removed.")
859
+
860
+ return OUTPUT_VIDEO_FILENAME
861
+
862
+ # ---------------- Gradio Interface ---------------- #
863
+ VOICE_CHOICES = {
864
+ 'Emma (Female)': 'af_heart',
865
+ 'Bella (Female)': 'af_bella',
866
+ 'Nicole (Female)': 'af_nicole',
867
+ 'Aoede (Female)': 'af_aoede',
868
+ 'Kore (Female)': 'af_kore',
869
+ 'Sarah (Female)': 'af_sarah',
870
+ 'Nova (Female)': 'af_nova',
871
+ 'Sky (Female)': 'af_sky',
872
+ 'Alloy (Female)': 'af_alloy',
873
+ 'Jessica (Female)': 'af_jessica',
874
+ 'River (Female)': 'af_river',
875
+ 'Michael (Male)': 'am_michael',
876
+ 'Fenrir (Male)': 'am_fenrir',
877
+ 'Puck (Male)': 'am_puck',
878
+ 'Echo (Male)': 'am_echo',
879
+ 'Eric (Male)': 'am_eric',
880
+ 'Liam (Male)': 'am_liam',
881
+ 'Onyx (Male)': 'am_onyx',
882
+ 'Santa (Male)': 'am_santa',
883
+ 'Adam (Male)': 'am_adam',
884
+ 'Emma πŸ‡¬πŸ‡§ (Female)': 'bf_emma',
885
+ 'Isabella πŸ‡¬πŸ‡§ (Female)': 'bf_isabella',
886
+ 'Alice πŸ‡¬πŸ‡§ (Female)': 'bf_alice',
887
+ 'Lily πŸ‡¬πŸ‡§ (Female)': 'bf_lily',
888
+ 'George πŸ‡¬πŸ‡§ (Male)': 'bm_george',
889
+ 'Fable πŸ‡¬πŸ‡§ (Male)': 'bm_fable',
890
+ 'Lewis πŸ‡¬πŸ‡§ (Male)': 'bm_lewis',
891
+ 'Daniel πŸ‡¬πŸ‡§ (Male)': 'bm_daniel'
892
+ }
893
+
894
+ def generate_video_with_options(user_input, resolution, caption_option, music_file, voice, vclip_prob, bg_vol, video_fps, video_preset, v_speed, caption_size):
895
+ global selected_voice, voice_speed, font_size, video_clip_probability, bg_music_volume, fps, preset
896
+
897
+ # Update global variables with user selections
898
+ selected_voice = VOICE_CHOICES[voice]
899
+ voice_speed = v_speed
900
+ font_size = caption_size
901
+ video_clip_probability = vclip_prob / 100 # Convert from percentage to decimal
902
+ bg_music_volume = bg_vol
903
+ fps = video_fps
904
+ preset = video_preset
905
+
906
+ # Handle music upload
907
+ if music_file is not None:
908
+ target_path = "music.mp3"
909
+ shutil.copy(music_file.name, target_path)
910
+ print(f"Uploaded music saved as: {target_path}")
911
+
912
+ # Generate the video
913
+ return generate_video(user_input, resolution, caption_option)
914
+
915
+ # Create the Gradio interface
916
+ iface = gr.Interface(
917
+ fn=generate_video_with_options,
918
+ inputs=[
919
+ gr.Textbox(label="Video Concept", placeholder="Enter your video concept here..."),
920
+ gr.Radio(["Full", "Short"], label="Resolution", value="Full"),
921
+ gr.Radio(["No"], label="Captions (Coming Soon)", value="No"),
922
+ gr.File(label="Upload Background Music (MP3)", file_types=[".mp3"]),
923
+ gr.Dropdown(choices=list(VOICE_CHOICES.keys()), label="Choose Voice", value="Emma (Female)"),
924
+ gr.Slider(0, 100, value=25, step=1, label="Video Clip Usage Probability (%)"),
925
+ gr.Slider(0.0, 1.0, value=0.08, step=0.01, label="Background Music Volume"),
926
+ gr.Slider(10, 60, value=30, step=1, label="Video FPS"),
927
+ gr.Dropdown(choices=["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow"],
928
+ value="veryfast", label="Export Preset"),
929
+ gr.Slider(0.5, 1.5, value=1.2, step=0.05, label="Voice Speed"),
930
+ gr.Slider(20, 100, value=45, step=1, label="Caption Font Size")
931
+ ],
932
+ outputs=gr.Video(label="Generated Video"),
933
+ title="AI Documentary Video Generator",
934
+ description="Create short documentary videos with AI. Upload music, choose voice, and customize settings."
935
+ )
936
+
937
+ # Launch the interface
938
+ if __name__ == "__main__":
939
+ iface.launch(share=True)
requirements (2).txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ transformers==4.49.0
3
+ moviepy==1.0.3
4
+ gTTS
5
+ requests
6
+ pydub
7
+ pillow
8
+ kokoro>=0.3.4
9
+ soundfile
10
+ pysrt
11
+ opencv-python-headless
12
+ beautifulsoup4