qqwjq1981 commited on
Commit
40b3f9e
·
verified ·
1 Parent(s): fd83135

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +2 -0
  2. NotoSansSC-Regular.ttf +3 -0
  3. README.md +5 -4
  4. app.py +741 -0
  5. apt.txt +9 -0
  6. requirements.txt +22 -0
  7. speaker_default_sample.wav +3 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ NotoSansSC-Regular.ttf filter=lfs diff=lfs merge=lfs -text
37
+ speaker_default_sample.wav filter=lfs diff=lfs merge=lfs -text
NotoSansSC-Regular.ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cf8b2a0576d5680284ab03a7a8219499d59bbe981a79bb3dc0031f251c39736
3
+ size 10560616
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Studio V1 Test
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.23.3
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: studio_V1
3
+ emoji: 🔥
4
+ colorFrom: pink
5
+ colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.23.3
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Studio
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cvxpy as cp
3
+ import re
4
+ import concurrent.futures
5
+ import gradio as gr
6
+ from datetime import datetime
7
+ import random
8
+ import moviepy
9
+ from transformers import pipeline
10
+ from transformers.pipelines.audio_utils import ffmpeg_read
11
+ from moviepy.editor import (
12
+ ImageClip,
13
+ VideoFileClip,
14
+ TextClip,
15
+ CompositeVideoClip,
16
+ CompositeAudioClip,
17
+ AudioFileClip,
18
+ concatenate_videoclips,
19
+ concatenate_audioclips
20
+ )
21
+ from PIL import Image, ImageDraw, ImageFont
22
+ from moviepy.audio.AudioClip import AudioArrayClip
23
+ import subprocess
24
+ import speech_recognition as sr
25
+ import json
26
+ from nltk.tokenize import sent_tokenize
27
+ import logging
28
+ import whisperx
29
+ import time
30
+ import os
31
+ import openai
32
+ from openai import OpenAI
33
+ import traceback
34
+ from TTS.api import TTS
35
+ import torch
36
+ from pydub import AudioSegment
37
+ from pyannote.audio import Pipeline
38
+ import traceback
39
+ import wave
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+ # Configure logging
44
+ logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
45
+ logger = logging.getLogger(__name__)
46
+ logger.info(f"MoviePy Version: {moviepy.__version__}")
47
+
48
+ # Accept license terms for Coqui XTTS
49
+ os.environ["COQUI_TOS_AGREED"] = "1"
50
+ # torch.serialization.add_safe_globals([XttsConfig])
51
+
52
+ logger.info(gr.__version__)
53
+
54
+ client = OpenAI(
55
+ api_key= os.environ.get("openAI_api_key"), # This is the default and can be omitted
56
+ )
57
+ hf_api_key = os.environ.get("hf_token")
58
+
59
+ def silence(duration, fps=44100):
60
+ """
61
+ Returns a silent AudioClip of the specified duration.
62
+ """
63
+ return AudioArrayClip(np.zeros((int(fps*duration), 2)), fps=fps)
64
+
65
+ def count_words_or_characters(text):
66
+ # Count non-Chinese words
67
+ non_chinese_words = len(re.findall(r'\b[a-zA-Z0-9]+\b', text))
68
+
69
+ # Count Chinese characters
70
+ chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
71
+
72
+ return non_chinese_words + chinese_chars
73
+
74
+ # Define the passcode
75
+ PASSCODE = "show_feedback_db"
76
+
77
+ css = """
78
+ /* Adjust row height */
79
+ .dataframe-container tr {
80
+ height: 50px !important;
81
+ }
82
+
83
+ /* Ensure text wrapping and prevent overflow */
84
+ .dataframe-container td {
85
+ white-space: normal !important;
86
+ word-break: break-word !important;
87
+ }
88
+
89
+ /* Set column widths */
90
+ [data-testid="block-container"] .scrolling-dataframe th:nth-child(1),
91
+ [data-testid="block-container"] .scrolling-dataframe td:nth-child(1) {
92
+ width: 6%; /* Start column */
93
+ }
94
+
95
+ [data-testid="block-container"] .scrolling-dataframe th:nth-child(2),
96
+ [data-testid="block-container"] .scrolling-dataframe td:nth-child(2) {
97
+ width: 47%; /* Original text */
98
+ }
99
+
100
+ [data-testid="block-container"] .scrolling-dataframe th:nth-child(3),
101
+ [data-testid="block-container"] .scrolling-dataframe td:nth-child(3) {
102
+ width: 47%; /* Translated text */
103
+ }
104
+
105
+ [data-testid="block-container"] .scrolling-dataframe th:nth-child(4),
106
+ [data-testid="block-container"] .scrolling-dataframe td:nth-child(4) {
107
+ display: none !important;
108
+ }
109
+ """
110
+
111
+ # Function to save feedback or provide access to the database file
112
+ def handle_feedback(feedback):
113
+ feedback = feedback.strip() # Clean up leading/trailing whitespace
114
+ if not feedback:
115
+ return "Feedback cannot be empty.", None
116
+
117
+ if feedback == PASSCODE:
118
+ # Provide access to the feedback.db file
119
+ return "Access granted! Download the database file below.", "feedback.db"
120
+ else:
121
+ # Save feedback to the database
122
+ with sqlite3.connect("feedback.db") as conn:
123
+ cursor = conn.cursor()
124
+ cursor.execute("CREATE TABLE IF NOT EXISTS studio_feedback (id INTEGER PRIMARY KEY, comment TEXT)")
125
+ cursor.execute("INSERT INTO studio_feedback (comment) VALUES (?)", (feedback,))
126
+ conn.commit()
127
+ return "Thank you for your feedback!", None
128
+
129
+ def segment_background_audio(audio_path, background_audio_path="background_segments.wav"):
130
+ pipeline = Pipeline.from_pretrained("pyannote/voice-activity-detection", use_auth_token=hf_api_key)
131
+ vad_result = pipeline(audio_path)
132
+
133
+ full_audio = AudioSegment.from_wav(audio_path)
134
+ full_duration_sec = len(full_audio) / 1000.0
135
+
136
+ current_time = 0.0
137
+ result_audio = AudioSegment.empty()
138
+
139
+ for segment in vad_result.itersegments():
140
+ # Background segment before the speech
141
+ if current_time < segment.start:
142
+ bg = full_audio[int(current_time * 1000):int(segment.start * 1000)]
143
+ result_audio += bg
144
+ # Add silence for the speech duration
145
+ silence_duration = segment.end - segment.start
146
+ result_audio += AudioSegment.silent(duration=int(silence_duration * 1000))
147
+ current_time = segment.end
148
+
149
+ # Handle any remaining background after the last speech
150
+ if current_time < full_duration_sec:
151
+ result_audio += full_audio[int(current_time * 1000):]
152
+
153
+ result_audio.export(background_audio_path, format="wav")
154
+ return background_audio_path
155
+
156
+ def transcribe_video_with_speakers(video_path):
157
+ # Extract audio from video
158
+ video = VideoFileClip(video_path)
159
+ audio_path = "audio.wav"
160
+ video.audio.write_audiofile(audio_path)
161
+ logger.info(f"Audio extracted from video: {audio_path}")
162
+
163
+ segment_result = segment_background_audio(audio_path)
164
+ print(f"Saved non-speech (background) audio to local")
165
+
166
+ # Set up device
167
+ device = "cuda" if torch.cuda.is_available() else "cpu"
168
+ logger.info(f"Using device: {device}")
169
+
170
+ try:
171
+ # Load a medium model with float32 for broader compatibility
172
+ model = whisperx.load_model("large-v3", device=device, compute_type="float32")
173
+ logger.info("WhisperX model loaded")
174
+
175
+ # Transcribe
176
+ result = model.transcribe(audio_path, chunk_size=6, print_progress = True)
177
+ logger.info("Audio transcription completed")
178
+
179
+ # Get the detected language
180
+ detected_language = result["language"]
181
+ logger.debug(f"Detected language: {detected_language}")
182
+ # Alignment
183
+ model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
184
+ result = whisperx.align(result["segments"], model_a, metadata, audio_path, device)
185
+ logger.info("Transcription alignment completed")
186
+
187
+ # Diarization (works independently of Whisper model size)
188
+ diarize_model = whisperx.DiarizationPipeline(use_auth_token=hf_api_key, device=device)
189
+ diarize_segments = diarize_model(audio_path)
190
+ logger.info("Speaker diarization completed")
191
+
192
+ # Assign speakers
193
+ result = whisperx.assign_word_speakers(diarize_segments, result)
194
+ logger.info("Speakers assigned to transcribed segments")
195
+
196
+ except Exception as e:
197
+ logger.error(f"❌ WhisperX pipeline failed: {e}")
198
+
199
+ # Extract timestamps, text, and speaker IDs
200
+ transcript_with_speakers = [
201
+ {
202
+ "start": segment["start"],
203
+ "end": segment["end"],
204
+ "text": segment["text"],
205
+ "speaker": segment["speaker"]
206
+ }
207
+ for segment in result["segments"]
208
+ ]
209
+
210
+ # Collect audio for each speaker
211
+ speaker_audio = {}
212
+ for segment in result["segments"]:
213
+ speaker = segment["speaker"]
214
+ if speaker not in speaker_audio:
215
+ speaker_audio[speaker] = []
216
+ speaker_audio[speaker].append((segment["start"], segment["end"]))
217
+
218
+ # Collapse and truncate speaker audio
219
+ speaker_sample_paths = {}
220
+ audio_clip = AudioFileClip(audio_path)
221
+ for speaker, segments in speaker_audio.items():
222
+ speaker_clips = [audio_clip.subclip(start, end) for start, end in segments]
223
+ combined_clip = concatenate_audioclips(speaker_clips)
224
+ truncated_clip = combined_clip.subclip(0, min(30, combined_clip.duration))
225
+ sample_path = f"speaker_{speaker}_sample.wav"
226
+ truncated_clip.write_audiofile(sample_path)
227
+ speaker_sample_paths[speaker] = sample_path
228
+ logger.info(f"Created sample for {speaker}: {sample_path}")
229
+
230
+ # Clean up
231
+ video.close()
232
+ audio_clip.close()
233
+ os.remove(audio_path)
234
+
235
+ return transcript_with_speakers, detected_language
236
+
237
+ # Function to get the appropriate translation model based on target language
238
+ def get_translation_model(source_language, target_language):
239
+ """
240
+ Get the translation model based on the source and target language.
241
+
242
+ Parameters:
243
+ - target_language (str): The language to translate the content into (e.g., 'es', 'fr').
244
+ - source_language (str): The language of the input content (default is 'en' for English).
245
+
246
+ Returns:
247
+ - str: The translation model identifier.
248
+ """
249
+ # List of allowable languages
250
+ allowable_languages = ["en", "es", "fr", "zh", "de", "it", "pt", "ja", "ko", "ru"]
251
+
252
+ # Validate source and target languages
253
+ if source_language not in allowable_languages:
254
+ logger.debug(f"Invalid source language '{source_language}'. Supported languages are: {', '.join(allowable_languages)}")
255
+ # Return a default model if source language is invalid
256
+ source_language = "en" # Default to 'en'
257
+
258
+ if target_language not in allowable_languages:
259
+ logger.debug(f"Invalid target language '{target_language}'. Supported languages are: {', '.join(allowable_languages)}")
260
+ # Return a default model if target language is invalid
261
+ target_language = "zh" # Default to 'zh'
262
+
263
+ if source_language == target_language:
264
+ source_language = "en" # Default to 'en'
265
+ target_language = "zh" # Default to 'zh'
266
+
267
+ # Return the model using string concatenation
268
+ return f"Helsinki-NLP/opus-mt-{source_language}-{target_language}"
269
+
270
+ def translate_single_entry(entry, translator):
271
+ original_text = entry["text"]
272
+ translated_text = translator(original_text)[0]['translation_text']
273
+ return {
274
+ "start": entry["start"],
275
+ "original": original_text,
276
+ "translated": translated_text,
277
+ "end": entry["end"],
278
+ "speaker": entry["speaker"]
279
+ }
280
+
281
+ def translate_text(transcription_json, source_language, target_language):
282
+ # Load the translation model for the specified target language
283
+ translation_model_id = get_translation_model(source_language, target_language)
284
+ logger.debug(f"Translation model: {translation_model_id}")
285
+ translator = pipeline("translation", model=translation_model_id)
286
+
287
+ # Use ThreadPoolExecutor to parallelize translations
288
+ with concurrent.futures.ThreadPoolExecutor() as executor:
289
+ # Submit all translation tasks and collect results
290
+ translate_func = lambda entry: translate_single_entry(entry, translator)
291
+ translated_json = list(executor.map(translate_func, transcription_json))
292
+
293
+ # Sort the translated_json by start time
294
+ translated_json.sort(key=lambda x: x["start"])
295
+
296
+ # Log the components being added to translated_json
297
+ for entry in translated_json:
298
+ logger.debug("Added to translated_json: start=%s, original=%s, translated=%s, end=%s, speaker=%s",
299
+ entry["start"], entry["original"], entry["translated"], entry["end"], entry["speaker"])
300
+
301
+ return translated_json
302
+
303
+ def update_translations(file, edited_table, process_mode):
304
+ """
305
+ Update the translations based on user edits in the Gradio Dataframe.
306
+ """
307
+ output_video_path = "output_video.mp4"
308
+ logger.debug(f"Editable Table: {edited_table}")
309
+
310
+ if file is None:
311
+ logger.info("No file uploaded. Please upload a video/audio file.")
312
+ return None, [], None, "No file uploaded. Please upload a video/audio file."
313
+
314
+ try:
315
+ start_time = time.time() # Start the timer
316
+
317
+ # Convert the edited_table (list of lists) back to list of dictionaries
318
+ updated_translations = [
319
+ {
320
+ "start": row["start"], # Access by column name
321
+ "original": row["original"],
322
+ "translated": row["translated"],
323
+ "end": row["end"]
324
+ }
325
+ for _, row in edited_table.iterrows()
326
+ ]
327
+
328
+ # Call the function to process the video with updated translations
329
+ add_transcript_voiceover(file.name, updated_translations, output_video_path, process_mode)
330
+
331
+ # Calculate elapsed time
332
+ elapsed_time = time.time() - start_time
333
+ elapsed_time_display = f"Updates applied successfully in {elapsed_time:.2f} seconds."
334
+
335
+ return output_video_path, elapsed_time_display
336
+
337
+ except Exception as e:
338
+ raise ValueError(f"Error updating translations: {e}")
339
+
340
+ def create_subtitle_clip_pil(text, start_time, end_time, video_width, video_height, font_path):
341
+ try:
342
+ subtitle_width = int(video_width * 0.8)
343
+ aspect_ratio = video_height / video_width
344
+ if aspect_ratio > 1.2: # Portrait video
345
+ subtitle_font_size = int(video_width // 22)
346
+ else: # Landscape video
347
+ subtitle_font_size = int(video_height // 24)
348
+
349
+ font = ImageFont.truetype(font_path, subtitle_font_size)
350
+
351
+ dummy_img = Image.new("RGBA", (subtitle_width, 1), (0, 0, 0, 0))
352
+ draw = ImageDraw.Draw(dummy_img)
353
+
354
+ lines = []
355
+ line = ""
356
+ for word in text.split():
357
+ test_line = f"{line} {word}".strip()
358
+ bbox = draw.textbbox((0, 0), test_line, font=font)
359
+ w = bbox[2] - bbox[0]
360
+ if w <= subtitle_width - 10:
361
+ line = test_line
362
+ else:
363
+ lines.append(line)
364
+ line = word
365
+ lines.append(line)
366
+
367
+ line_heights = [draw.textbbox((0, 0), l, font=font)[3] - draw.textbbox((0, 0), l, font=font)[1] for l in lines]
368
+ total_height = sum(line_heights) + (len(lines) - 1) * 5
369
+ img = Image.new("RGBA", (subtitle_width, total_height), (0, 0, 0, 0))
370
+ draw = ImageDraw.Draw(img)
371
+
372
+ y = 0
373
+ for idx, line in enumerate(lines):
374
+ bbox = draw.textbbox((0, 0), line, font=font)
375
+ w = bbox[2] - bbox[0]
376
+ draw.text(((subtitle_width - w) // 2, y), line, font=font, fill="yellow")
377
+ y += line_heights[idx] + 5
378
+
379
+ img_np = np.array(img) # <- ✅ Fix: convert to NumPy
380
+ txt_clip = ImageClip(img_np).set_start(start_time).set_duration(end_time - start_time).set_position("bottom").set_opacity(0.8)
381
+ return txt_clip
382
+ except Exception as e:
383
+ logger.error(f"\u274c Failed to create subtitle clip: {e}")
384
+ return None
385
+
386
+ def solve_optimal_alignment(original_segments, generated_durations, total_duration):
387
+ """
388
+ Robust version: Aligns generated speech segments, falls back to greedy allocation if solver fails.
389
+ Modifies and returns the translated_json with updated 'start' and 'end'.
390
+ """
391
+ N = len(original_segments)
392
+ d = np.array(generated_durations)
393
+ m = np.array([(seg['start'] + seg['end']) / 2 for seg in original_segments])
394
+
395
+ try:
396
+ s = cp.Variable(N)
397
+ objective = cp.Minimize(cp.sum_squares(s + d / 2 - m))
398
+
399
+ constraints = [s[0] >= 0]
400
+ for i in range(N - 1):
401
+ constraints.append(s[i] + d[i] <= s[i + 1])
402
+ constraints.append(s[N - 1] + d[N - 1] == total_duration)
403
+
404
+ problem = cp.Problem(objective, constraints)
405
+ problem.solve()
406
+
407
+ if s.value is None:
408
+ raise ValueError("Solver failed")
409
+
410
+ for i in range(N):
411
+ original_segments[i]['start'] = round(s.value[i], 3)
412
+ original_segments[i]['end'] = round(s.value[i] + d[i], 3)
413
+
414
+ except Exception as e:
415
+ print(f"⚠️ Optimization failed: {e}, falling back to greedy alignment.")
416
+
417
+ current_time = 0.0
418
+ for i in range(N):
419
+ original_segments[i]['start'] = round(current_time, 3)
420
+ original_segments[i]['end'] = round(current_time + generated_durations[i], 3)
421
+ current_time += generated_durations[i]
422
+
423
+ return original_segments
424
+ def process_entry(entry, i, tts_model, video_width, video_height, process_mode, target_language, font_path, speaker_sample_paths=None):
425
+ logger.debug(f"Processing entry {i}: {entry}")
426
+ error_message = None
427
+
428
+ try:
429
+ txt_clip = create_subtitle_clip_pil(entry["translated"], entry["start"], entry["end"], video_width, video_height, font_path)
430
+ except Exception as e:
431
+ error_message = f"❌ Failed to create subtitle clip for entry {i}: {e}"
432
+ logger.error(error_message)
433
+ txt_clip = None
434
+
435
+ audio_segment = None
436
+ actual_duration = 0.0
437
+ if process_mode > 1:
438
+ try:
439
+ segment_audio_path = f"segment_{i}_voiceover.wav"
440
+ desired_duration = entry["end"] - entry["start"]
441
+ desired_speed = calibrated_speed(entry['translated'], desired_duration)
442
+
443
+ speaker = entry.get("speaker", "default")
444
+ speaker_wav_path = f"speaker_{speaker}_sample.wav"
445
+
446
+ supported_languages = tts_model.synthesizer.tts_model.language_manager.name_to_id.keys()
447
+
448
+ if process_mode > 2 and speaker_wav_path and os.path.exists(speaker_wav_path) and target_language in supported_languages:
449
+ generate_voiceover_clone(entry['translated'], tts_model, desired_speed, target_language, speaker_wav_path, segment_audio_path)
450
+ else:
451
+ generate_voiceover_OpenAI(entry['translated'], target_language, desired_speed, segment_audio_path)
452
+
453
+ if not segment_audio_path or not os.path.exists(segment_audio_path):
454
+ raise FileNotFoundError(f"Voiceover file not generated at: {segment_audio_path}")
455
+
456
+ audio_clip = AudioFileClip(segment_audio_path)
457
+ actual_duration = audio_clip.duration
458
+
459
+ audio_segment = audio_clip # Do not set start here, alignment happens later
460
+
461
+ except Exception as e:
462
+ err = f"❌ Failed to generate audio segment for entry {i}: {e}"
463
+ logger.error(err)
464
+ error_message = error_message + " | " + err if error_message else err
465
+ audio_segment = None
466
+
467
+ return i, txt_clip, audio_segment, actual_duration, error_message
468
+
469
+
470
+ def add_transcript_voiceover(video_path, translated_json, output_path, process_mode, target_language="en", speaker_sample_paths=None, background_audio_path="background_segments.wav"):
471
+
472
+ video = VideoFileClip(video_path)
473
+ font_path = "./NotoSansSC-Regular.ttf"
474
+
475
+ text_clips = []
476
+ audio_segments = []
477
+ actual_durations = []
478
+ error_messages = []
479
+
480
+ if process_mode == 3:
481
+ global tts_model
482
+ if tts_model is None:
483
+ try:
484
+ print("🔄 Loading XTTS model...")
485
+ from TTS.api import TTS
486
+ tts_model = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts")
487
+ print("✅ XTTS model loaded successfully.")
488
+ except Exception as e:
489
+ print("❌ Error loading XTTS model:")
490
+ traceback.print_exc()
491
+ return f"Error loading XTTS model: {e}"
492
+
493
+ with concurrent.futures.ThreadPoolExecutor() as executor:
494
+ futures = [executor.submit(process_entry, entry, i, tts_model, video.w, video.h, process_mode, target_language, font_path, speaker_sample_paths)
495
+ for i, entry in enumerate(translated_json)]
496
+
497
+ results = []
498
+ for future in concurrent.futures.as_completed(futures):
499
+ try:
500
+ i, txt_clip, audio_segment, actual_duration, error = future.result()
501
+ results.append((i, txt_clip, audio_segment, actual_duration))
502
+ if error:
503
+ error_messages.append(f"[Entry {i}] {error}")
504
+ except Exception as e:
505
+ err = f"❌ Unexpected error in future result: {e}"
506
+ error_messages.append(err)
507
+
508
+ results.sort(key=lambda x: x[0])
509
+ text_clips = [clip for _, clip, _, _ in results if clip]
510
+ generated_durations = [dur for _, _, _, dur in results if dur > 0]
511
+
512
+ # Align using optimization (modifies translated_json in-place)
513
+ translated_json = solve_optimal_alignment(translated_json, generated_durations, video.duration)
514
+
515
+ # Set aligned timings
516
+ audio_segments = []
517
+ for i, entry in enumerate(translated_json):
518
+ segment = results[i][2] # AudioFileClip
519
+ if segment:
520
+ segment = segment.set_start(entry['start']).set_duration(entry['end'] - entry['start'])
521
+ audio_segments.append(segment)
522
+
523
+ final_video = CompositeVideoClip([video] + text_clips)
524
+
525
+ if process_mode > 1 and audio_segments:
526
+ try:
527
+ voice_audio = CompositeAudioClip(audio_segments).set_duration(video.duration)
528
+
529
+ if background_audio_path and os.path.exists(background_audio_path):
530
+ background_audio = AudioFileClip(background_audio_path).set_duration(video.duration)
531
+ final_audio = CompositeAudioClip([voice_audio, background_audio])
532
+ else:
533
+ final_audio = voice_audio
534
+
535
+ final_video = final_video.set_audio(final_audio)
536
+
537
+ except Exception as e:
538
+ print(f"❌ Failed to set audio: {e}")
539
+
540
+ final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
541
+
542
+ return error_messages
543
+
544
+ def generate_voiceover_OpenAI(full_text, language, desired_speed, output_audio_path):
545
+ """
546
+ Generate voiceover from translated text for a given language using OpenAI TTS API.
547
+ """
548
+ # Define the voice based on the language (for now, use 'alloy' as default)
549
+ voice = "alloy" # Adjust based on language if needed
550
+
551
+ # Define the model (use tts-1 for real-time applications)
552
+ model = "tts-1"
553
+
554
+ max_retries = 3
555
+ retry_count = 0
556
+
557
+ while retry_count < max_retries:
558
+ try:
559
+ # Create the speech using OpenAI TTS API
560
+ response = client.audio.speech.create(
561
+ model=model,
562
+ voice=voice,
563
+ input=full_text,
564
+ speed=desired_speed
565
+ )
566
+ # Save the audio to the specified path
567
+ with open(output_audio_path, 'wb') as f:
568
+ for chunk in response.iter_bytes():
569
+ f.write(chunk)
570
+ logging.info(f"Voiceover generated successfully for {output_audio_path}")
571
+ break
572
+
573
+ except Exception as e:
574
+ retry_count += 1
575
+ logging.error(f"Error generating voiceover (retry {retry_count}/{max_retries}): {e}")
576
+ time.sleep(5) # Wait 5 seconds before retrying
577
+
578
+ if retry_count == max_retries:
579
+ raise ValueError(f"Failed to generate voiceover after {max_retries} retries.")
580
+
581
+ def generate_voiceover_clone(full_text, tts_model, desired_speed, target_language, speaker_wav_path, output_audio_path):
582
+ try:
583
+
584
+ tts_model.tts_to_file(
585
+ text=full_text,
586
+ speaker_wav=speaker_wav_path,
587
+ language=target_language,
588
+ file_path=output_audio_path,
589
+ speed=desired_speed,
590
+ split_sentences=True
591
+ )
592
+ msg = "✅ Voice cloning completed successfully."
593
+ logger.info(msg)
594
+ return output_audio_path, msg, None
595
+
596
+ except Exception as e:
597
+ generate_voiceover_OpenAI(full_text, target_language, desired_speed, output_audio_path)
598
+ err_msg = f"❌ An error occurred: {str(e)}, fallback to premium voice"
599
+ logger.error(traceback.format_exc())
600
+ return None, err_msg, err_msg
601
+
602
+ def calibrated_speed(text, desired_duration):
603
+ """
604
+ Compute a speed factor to help TTS fit audio into desired duration,
605
+ using a simple truncated linear function of characters per second.
606
+ """
607
+ char_count = len(text.strip())
608
+ if char_count == 0 or desired_duration <= 0:
609
+ return 1.0 # fallback
610
+
611
+ cps = char_count / desired_duration # characters per second
612
+
613
+ # Truncated linear mapping
614
+ if cps < 14:
615
+ return 1.0
616
+ elif cps > 30:
617
+ return 2
618
+ else:
619
+ slope = (2 - 1.0) / (30 - 14)
620
+ return 1.0 + slope * (cps - 14)
621
+
622
+ def upload_and_manage(file, target_language, process_mode):
623
+ if file is None:
624
+ logger.info("No file uploaded. Please upload a video/audio file.")
625
+ return None, [], None, "No file uploaded. Please upload a video/audio file."
626
+
627
+ try:
628
+ start_time = time.time() # Start the timer
629
+ logger.info(f"Started processing file: {file.name}")
630
+
631
+ # Define paths for audio and output files
632
+ audio_path = "audio.wav"
633
+ output_video_path = "output_video.mp4"
634
+ voiceover_path = "voiceover.wav"
635
+ logger.info(f"Using audio path: {audio_path}, output video path: {output_video_path}, voiceover path: {voiceover_path}")
636
+
637
+ # Step 1: Transcribe audio from uploaded media file and get timestamps
638
+ logger.info("Transcribing audio...")
639
+ transcription_json, source_language = transcribe_video_with_speakers(file.name)
640
+ logger.info(f"Transcription completed. Detected source language: {source_language}")
641
+
642
+ # Step 2: Translate the transcription
643
+ logger.info(f"Translating transcription from {source_language} to {target_language}...")
644
+ translated_json = translate_text(transcription_json, source_language, target_language)
645
+ logger.info(f"Translation completed. Number of translated segments: {len(translated_json)}")
646
+
647
+ # Step 3: Add transcript to video based on timestamps
648
+ logger.info("Adding translated transcript to video...")
649
+ add_transcript_voiceover(file.name, translated_json, output_video_path, process_mode, target_language)
650
+ logger.info(f"Transcript added to video. Output video saved at {output_video_path}")
651
+
652
+ # Convert translated JSON into a format for the editable table
653
+ logger.info("Converting translated JSON into editable table format...")
654
+ editable_table = [
655
+ [float(entry["start"]), entry["original"], entry["translated"], float(entry["end"]), entry["speaker"]]
656
+ for entry in translated_json
657
+ ]
658
+
659
+ # Calculate elapsed time
660
+ elapsed_time = time.time() - start_time
661
+ elapsed_time_display = f"Processing completed in {elapsed_time:.2f} seconds."
662
+ logger.info(f"Processing completed in {elapsed_time:.2f} seconds.")
663
+
664
+ return editable_table, output_video_path, elapsed_time_display
665
+
666
+ except Exception as e:
667
+ logger.error(f"An error occurred: {str(e)}")
668
+ return [], None, f"An error occurred: {str(e)}"
669
+
670
+ # Gradio Interface with Tabs
671
+ def build_interface():
672
+ with gr.Blocks(css=css) as demo:
673
+ gr.Markdown("## Video Localization")
674
+ with gr.Row():
675
+ with gr.Column(scale=4):
676
+ file_input = gr.File(label="Upload Video/Audio File")
677
+ language_input = gr.Dropdown(["en", "es", "fr", "zh"], label="Select Language") # Language codes
678
+ process_mode = gr.Radio(choices=[("Transcription Only", 1),("Transcription with Premium Voice",2),("Transcription with Voice Clone", 3)],label="Choose Processing Type",value=1)
679
+ submit_button = gr.Button("Post and Process")
680
+
681
+ with gr.Column(scale=8):
682
+ gr.Markdown("## Edit Translations")
683
+
684
+ # Editable JSON Data
685
+ editable_table = gr.Dataframe(
686
+ value=[], # Default to an empty list to avoid undefined values
687
+ headers=["start", "original", "translated", "end", "speaker"],
688
+ datatype=["number", "str", "str", "number", "str"],
689
+ row_count=1, # Initially empty
690
+ col_count=5,
691
+ interactive=[False, True, True, False, False], # Control editability
692
+ label="Edit Translations",
693
+ wrap=True # Enables text wrapping if supported
694
+ )
695
+ save_changes_button = gr.Button("Save Changes")
696
+ processed_video_output = gr.File(label="Download Processed Video", interactive=True) # Download button
697
+ elapsed_time_display = gr.Textbox(label="Elapsed Time", lines=1, interactive=False)
698
+
699
+ with gr.Column(scale=1):
700
+ gr.Markdown("**Feedback**")
701
+ feedback_input = gr.Textbox(
702
+ placeholder="Leave your feedback here...",
703
+ label=None,
704
+ lines=3,
705
+ )
706
+ feedback_btn = gr.Button("Submit Feedback")
707
+ response_message = gr.Textbox(label=None, lines=1, interactive=False)
708
+ db_download = gr.File(label="Download Database File", visible=False)
709
+
710
+ # Link the feedback handling
711
+ def feedback_submission(feedback):
712
+ message, file_path = handle_feedback(feedback)
713
+ if file_path:
714
+ return message, gr.update(value=file_path, visible=True)
715
+ return message, gr.update(visible=False)
716
+
717
+ save_changes_button.click(
718
+ update_translations,
719
+ inputs=[file_input, editable_table, process_mode],
720
+ outputs=[processed_video_output, elapsed_time_display]
721
+ )
722
+
723
+ submit_button.click(
724
+ upload_and_manage,
725
+ inputs=[file_input, language_input, process_mode],
726
+ outputs=[editable_table, processed_video_output, elapsed_time_display]
727
+ )
728
+
729
+ # Connect submit button to save_feedback_db function
730
+ feedback_btn.click(
731
+ feedback_submission,
732
+ inputs=[feedback_input],
733
+ outputs=[response_message, db_download]
734
+ )
735
+
736
+ return demo
737
+
738
+ tts_model = None
739
+ # Launch the Gradio interface
740
+ demo = build_interface()
741
+ demo.launch()
apt.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ espeak
2
+ ffmpeg
3
+ libsm6
4
+ libxext6
5
+ git
6
+ git-lfs
7
+ libgl1-mesa-glx
8
+ cmake
9
+ rsync
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core compatibility
2
+ numpy==1.26.4
3
+ transformers==4.49.0
4
+ # Coqui TTS (XTTS v2)
5
+ coqpit-config
6
+ coqui-tts==0.26.0
7
+ coqui-tts-trainer==0.2.3
8
+ torch==2.6.0 # Or the version best suited for your GPU/CPU
9
+ # Video Processing
10
+ moviepy==1.0.3
11
+ # Web Interface
12
+ gradio==5.23.3
13
+ # Audio Utilities (optional but often used)
14
+ soundfile
15
+ librosa
16
+ SpeechRecognition
17
+ whisperx==3.3.1
18
+ openai
19
+ pillow
20
+ cvxpy
21
+ # pyannote.audio
22
+ # torchaudio
speaker_default_sample.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d63e6190a950695c5cfa697f263c230e6f682be8822971ccaea67a8318a2d747
3
+ size 1800056