Spaces:
Running
on
Zero
Running
on
Zero
malvin noel
commited on
Commit
·
0940c19
1
Parent(s):
c6771b3
corrected app
Browse files- app.py +34 -37
- scripts/edit_video.py +34 -44
- scripts/generate_scripts.py +0 -1
- scripts/generate_subtitles.py +26 -11
- scripts/get_footage.py +69 -40
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
#app.py
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
import shutil
|
@@ -45,74 +44,72 @@ def process_video(
|
|
45 |
user_music: Optional[str] = None,
|
46 |
show_progress_bar: bool = True,
|
47 |
):
|
48 |
-
"""Build the final video
|
49 |
|
50 |
if not accumulated_videos:
|
51 |
raise ValueError("❌ Please upload at least one background video (.mp4) before generating.")
|
52 |
|
53 |
approx_words = int(target_duration * WORDS_PER_SECOND)
|
54 |
|
55 |
-
#
|
56 |
if script_mode == "Use my script":
|
57 |
if not custom_script or not custom_script.strip():
|
58 |
raise ValueError("❌ You selected 'Use my script' but the script field is empty!")
|
59 |
script = custom_script.strip()
|
|
|
|
|
60 |
else:
|
61 |
prompt = (
|
62 |
-
f"You are a video creation expert
|
63 |
-
f"Instruction
|
64 |
-
f"🔴 Strict target duration: {target_duration}s — ≈ {approx_words} words."
|
65 |
)
|
66 |
script = generate_script(prompt)
|
|
|
|
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
# ── 2. Préparation répertoires ───────────────────────────
|
72 |
-
for folder in ("./assets/audio", "./assets/backgrounds", "./assets/output", "./assets/video_music"):
|
73 |
os.makedirs(folder, exist_ok=True)
|
74 |
|
75 |
-
voice_path
|
76 |
-
final_no_subs
|
|
|
77 |
|
78 |
-
#
|
|
|
|
|
|
|
79 |
for idx, v in enumerate(accumulated_videos):
|
80 |
if not os.path.isfile(v) or not v.lower().endswith(".mp4"):
|
81 |
raise ValueError(f"❌ Invalid file: {v}")
|
82 |
safe_copy(v, os.path.join("./assets/backgrounds", f"video_{idx:03d}.mp4"))
|
83 |
|
84 |
-
#
|
85 |
-
|
86 |
-
generate_voice(script, voice_path)
|
87 |
|
88 |
-
#
|
|
|
89 |
_, out_no_audio = get_video_montage_from_folder(
|
90 |
folder_path="./assets/backgrounds",
|
91 |
-
audio_path=voice_path,
|
92 |
output_dir="./assets/video_music",
|
93 |
-
lum=lum,
|
|
|
|
|
94 |
show_progress_bar=show_progress_bar,
|
95 |
)
|
96 |
|
97 |
-
#
|
98 |
-
|
|
|
99 |
if add_subs:
|
100 |
segments = transcribe_audio_to_subs(voice_path)
|
101 |
-
subs
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
edit_video(
|
107 |
-
video_path = out_no_audio,
|
108 |
-
audio_path = voice_path,
|
109 |
-
music_path = music_path,
|
110 |
-
output_path = final_no_subs,
|
111 |
-
music_volume = 0.10,
|
112 |
-
subtitles = subs, # ← injectés ici
|
113 |
-
)
|
114 |
-
|
115 |
-
return script, title, description, final_no_subs
|
116 |
|
117 |
# ──────────────────────────────────────────────────────────────────────────────
|
118 |
# Upload helper
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import shutil
|
|
|
44 |
user_music: Optional[str] = None,
|
45 |
show_progress_bar: bool = True,
|
46 |
):
|
47 |
+
"""Build the final video using user‑defined visual parameters (brightness, contrast, gamma)."""
|
48 |
|
49 |
if not accumulated_videos:
|
50 |
raise ValueError("❌ Please upload at least one background video (.mp4) before generating.")
|
51 |
|
52 |
approx_words = int(target_duration * WORDS_PER_SECOND)
|
53 |
|
54 |
+
# --- 1. Script (AI or custom) ---
|
55 |
if script_mode == "Use my script":
|
56 |
if not custom_script or not custom_script.strip():
|
57 |
raise ValueError("❌ You selected 'Use my script' but the script field is empty!")
|
58 |
script = custom_script.strip()
|
59 |
+
title = generate_title(script)
|
60 |
+
description = generate_description(script)
|
61 |
else:
|
62 |
prompt = (
|
63 |
+
f"You are a video creation expert. Here is the context: {context.strip()}\n"
|
64 |
+
f"Instruction: {instruction.strip()}\n"
|
65 |
+
f"🔴 Strict target duration: {target_duration}s — ≈ {approx_words} words (must be respected)."
|
66 |
)
|
67 |
script = generate_script(prompt)
|
68 |
+
title = generate_title(script)
|
69 |
+
description = generate_description(script)
|
70 |
|
71 |
+
# --- 2. Prepare folders ---
|
72 |
+
for folder in ("./assets/audio", "./assets/backgrounds", "./assets/output"):
|
|
|
|
|
|
|
73 |
os.makedirs(folder, exist_ok=True)
|
74 |
|
75 |
+
voice_path = "./assets/audio/voice.mp3"
|
76 |
+
final_no_subs = "./assets/output/final_video.mp4"
|
77 |
+
final_with_subs = "./assets/output/final_video_subtitles.mp4"
|
78 |
|
79 |
+
# --- 3. Copy videos ---
|
80 |
+
for f in os.listdir("./assets/backgrounds"):
|
81 |
+
if f.lower().endswith(".mp4"):
|
82 |
+
os.remove(os.path.join("./assets/backgrounds", f))
|
83 |
for idx, v in enumerate(accumulated_videos):
|
84 |
if not os.path.isfile(v) or not v.lower().endswith(".mp4"):
|
85 |
raise ValueError(f"❌ Invalid file: {v}")
|
86 |
safe_copy(v, os.path.join("./assets/backgrounds", f"video_{idx:03d}.mp4"))
|
87 |
|
88 |
+
# --- 4. AI voice ---
|
89 |
+
generate_voice(script, voice_path)
|
|
|
90 |
|
91 |
+
# --- 5. Video montage ---
|
92 |
+
music_path = user_music if user_music and os.path.isfile(user_music) else None
|
93 |
_, out_no_audio = get_video_montage_from_folder(
|
94 |
folder_path="./assets/backgrounds",
|
95 |
+
audio_path=voice_path,
|
96 |
output_dir="./assets/video_music",
|
97 |
+
lum=lum,
|
98 |
+
contrast=contrast,
|
99 |
+
gamma=gamma,
|
100 |
show_progress_bar=show_progress_bar,
|
101 |
)
|
102 |
|
103 |
+
# --- 6. Mixing & subtitles ---
|
104 |
+
edit_video(out_no_audio, voice_path, music_path, final_no_subs)
|
105 |
+
|
106 |
if add_subs:
|
107 |
segments = transcribe_audio_to_subs(voice_path)
|
108 |
+
subs = chunk_text_by_words(segments, max_words=3)
|
109 |
+
add_subtitles_to_video(final_no_subs, subs, final_with_subs)
|
110 |
+
return script, title, description, final_with_subs
|
111 |
+
else:
|
112 |
+
return script, title, description, final_no_subs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
# ──────────────────────────────────────────────────────────────────────────────
|
115 |
# Upload helper
|
scripts/edit_video.py
CHANGED
@@ -1,79 +1,69 @@
|
|
1 |
-
|
2 |
-
|
|
|
3 |
|
4 |
-
|
5 |
-
VideoFileClip, AudioFileClip, CompositeAudioClip,
|
6 |
-
CompositeVideoClip
|
7 |
-
)
|
8 |
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
|
12 |
def edit_video(
|
13 |
video_path: str,
|
14 |
audio_path: str,
|
15 |
-
music_path:
|
16 |
output_path: str,
|
17 |
*,
|
18 |
music_volume: float = 0.10,
|
19 |
-
subtitles: Optional[List[Dict]] = None, # ← nouveau
|
20 |
):
|
21 |
-
|
22 |
-
Encodage final : ajoute voix, (optionnel) musique et sous-titres
|
23 |
-
en UNE seule passe.
|
24 |
-
"""
|
25 |
-
vid_clip = VideoFileClip(video_path)
|
26 |
voice_clip = AudioFileClip(audio_path)
|
27 |
-
|
28 |
-
# ── piste audio composite ─────────────────────────────────
|
29 |
tracks = [voice_clip]
|
30 |
-
|
|
|
31 |
try:
|
32 |
music_clip = (
|
33 |
AudioFileClip(music_path)
|
34 |
.with_volume_scaled(music_volume)
|
35 |
-
.with_duration(
|
36 |
)
|
37 |
tracks.insert(0, music_clip)
|
38 |
except Exception as err:
|
39 |
-
print(f"⚠️
|
40 |
-
|
41 |
-
final_audio = CompositeAudioClip(tracks).with_duration(vid_clip.duration)
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
if subtitles:
|
46 |
-
w, h = vid_clip.size
|
47 |
-
for sub in subtitles:
|
48 |
-
layers.append(
|
49 |
-
create_animated_subtitle_clip(
|
50 |
-
sub["text"], sub["start"], sub["end"], w, h
|
51 |
-
)
|
52 |
-
)
|
53 |
-
|
54 |
-
final_clip = (
|
55 |
-
CompositeVideoClip(layers, size=vid_clip.size)
|
56 |
-
.with_duration(vid_clip.duration)
|
57 |
-
.with_audio(final_audio)
|
58 |
-
)
|
59 |
|
60 |
-
# ── export ────────────────────────────────────────────────
|
61 |
-
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
62 |
final_clip.write_videofile(
|
63 |
output_path,
|
64 |
codec="libx264",
|
65 |
audio_codec="aac",
|
66 |
fps=30,
|
67 |
-
threads=
|
68 |
preset="medium",
|
69 |
-
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
70 |
)
|
71 |
-
print(f"✅
|
72 |
|
73 |
-
|
74 |
voice_clip.close()
|
75 |
if "music_clip" in locals():
|
76 |
music_clip.close()
|
77 |
final_audio.close()
|
78 |
final_clip.close()
|
79 |
-
vid_clip.close()
|
|
|
1 |
+
# ============================
|
2 |
+
# get_footage.py (unchanged)
|
3 |
+
# ============================
|
4 |
|
5 |
+
# (contenu identique à la précédente version – pas de modification)
|
|
|
|
|
|
|
6 |
|
7 |
+
# ============================
|
8 |
+
# edit_video.py (révision => musique optionnelle et volume paramétrable)
|
9 |
+
# ============================
|
10 |
+
|
11 |
+
"""Assemble la voix IA et, si fourni, la musique de fond.
|
12 |
+
|
13 |
+
Appel :
|
14 |
+
edit_video(
|
15 |
+
video_path="./assets/video_music/video_silent.mp4",
|
16 |
+
audio_path="./assets/audio/voice.mp3",
|
17 |
+
music_path=None, # ou chemin .mp3 / .wav
|
18 |
+
output_path="./assets/output/final_video.mp4",
|
19 |
+
music_volume=0.10, # volume musique (0‑1)
|
20 |
+
)
|
21 |
+
"""
|
22 |
+
|
23 |
+
from moviepy import VideoFileClip, AudioFileClip, CompositeAudioClip
|
24 |
+
import os
|
25 |
|
26 |
|
27 |
def edit_video(
|
28 |
video_path: str,
|
29 |
audio_path: str,
|
30 |
+
music_path: str | None,
|
31 |
output_path: str,
|
32 |
*,
|
33 |
music_volume: float = 0.10,
|
|
|
34 |
):
|
35 |
+
video_clip = VideoFileClip(video_path)
|
|
|
|
|
|
|
|
|
36 |
voice_clip = AudioFileClip(audio_path)
|
|
|
|
|
37 |
tracks = [voice_clip]
|
38 |
+
|
39 |
+
if music_path and os.path.isfile(music_path):
|
40 |
try:
|
41 |
music_clip = (
|
42 |
AudioFileClip(music_path)
|
43 |
.with_volume_scaled(music_volume)
|
44 |
+
.with_duration(video_clip.duration)
|
45 |
)
|
46 |
tracks.insert(0, music_clip)
|
47 |
except Exception as err:
|
48 |
+
print(f"⚠️ Musique ignorée : {err}")
|
|
|
|
|
49 |
|
50 |
+
final_audio = CompositeAudioClip(tracks).with_duration(video_clip.duration)
|
51 |
+
final_clip = video_clip.with_audio(final_audio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
|
|
|
|
53 |
final_clip.write_videofile(
|
54 |
output_path,
|
55 |
codec="libx264",
|
56 |
audio_codec="aac",
|
57 |
fps=30,
|
58 |
+
threads=4,
|
59 |
preset="medium",
|
60 |
+
ffmpeg_params=["-pix_fmt", "yuv420p"],
|
61 |
)
|
62 |
+
print(f"✅ Vidéo générée : {output_path}")
|
63 |
|
64 |
+
video_clip.close()
|
65 |
voice_clip.close()
|
66 |
if "music_clip" in locals():
|
67 |
music_clip.close()
|
68 |
final_audio.close()
|
69 |
final_clip.close()
|
|
scripts/generate_scripts.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
#generate_scripts.py
|
2 |
import os
|
3 |
import re
|
4 |
import json
|
|
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import json
|
scripts/generate_subtitles.py
CHANGED
@@ -25,8 +25,7 @@ SUBTITLE_COLORS = [
|
|
25 |
"white", "yellow", "cyan", "deeppink", "gold", "lightgreen", "magenta", "orange"
|
26 |
]
|
27 |
|
28 |
-
|
29 |
-
device="cuda" if torch.cuda.is_available() else "cpu")
|
30 |
|
31 |
|
32 |
def color_for_word(word: str) -> str:
|
@@ -86,14 +85,29 @@ def save_subtitles_to_srt(subtitles, output_path):
|
|
86 |
|
87 |
|
88 |
@spaces.GPU()
|
89 |
-
def transcribe_audio_to_subs(audio_path
|
90 |
-
"""
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
return subtitles
|
98 |
|
99 |
def format_subtitle_text(text, max_chars=50):
|
@@ -217,4 +231,5 @@ def add_subtitles_to_video(video_path, subtitles, output_file="./assets/output/v
|
|
217 |
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
218 |
)
|
219 |
|
220 |
-
print(f"✅ Vidéo Shorts/TikTok prête : {output_file}")
|
|
|
|
25 |
"white", "yellow", "cyan", "deeppink", "gold", "lightgreen", "magenta", "orange"
|
26 |
]
|
27 |
|
28 |
+
|
|
|
29 |
|
30 |
|
31 |
def color_for_word(word: str) -> str:
|
|
|
85 |
|
86 |
|
87 |
@spaces.GPU()
|
88 |
+
def transcribe_audio_to_subs(audio_path):
|
89 |
+
"""
|
90 |
+
Transcrit le fichier audio en texte (via Whisper), retourne la liste
|
91 |
+
des segments start/end/text, et sauvegarde en .srt.
|
92 |
+
"""
|
93 |
+
print("🎙️ Transcription avec Whisper...")
|
94 |
+
model = whisper.load_model("medium", device="cuda" if torch.cuda.is_available() else "cpu")
|
95 |
+
result = model.transcribe(audio_path)
|
96 |
+
|
97 |
+
subtitles = [{
|
98 |
+
"start": seg['start'],
|
99 |
+
"end": seg['end'],
|
100 |
+
"text": seg['text']
|
101 |
+
} for seg in result['segments']]
|
102 |
+
|
103 |
+
print(f"📝 {len(subtitles)} sous-titres générés.")
|
104 |
+
|
105 |
+
# Sauvegarde .srt
|
106 |
+
base_name = os.path.splitext(audio_path)[0]
|
107 |
+
srt_path = f"{base_name}.srt"
|
108 |
+
save_subtitles_to_srt(subtitles, srt_path)
|
109 |
+
print(f"💾 Sous-titres enregistrés dans : {srt_path}")
|
110 |
+
|
111 |
return subtitles
|
112 |
|
113 |
def format_subtitle_text(text, max_chars=50):
|
|
|
231 |
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
232 |
)
|
233 |
|
234 |
+
print(f"✅ Vidéo Shorts/TikTok prête : {output_file}")
|
235 |
+
|
scripts/get_footage.py
CHANGED
@@ -23,6 +23,7 @@ from moviepy import (
|
|
23 |
CompositeVideoClip
|
24 |
)
|
25 |
|
|
|
26 |
|
27 |
def add_pan_effect(clip):
|
28 |
"""
|
@@ -129,7 +130,7 @@ def apply_crossfade_effects(clips, duration=0.12):
|
|
129 |
|
130 |
def get_video_montage_from_folder(
|
131 |
folder_path: str = "./assets/videos",
|
132 |
-
audio_path: str
|
133 |
output_dir: str = "./assets/backgrounds",
|
134 |
lum: float = 6.0,
|
135 |
contrast: float = 1.0,
|
@@ -137,61 +138,86 @@ def get_video_montage_from_folder(
|
|
137 |
show_progress_bar: bool = True,
|
138 |
):
|
139 |
"""
|
140 |
-
|
141 |
-
|
142 |
-
|
|
|
|
|
143 |
"""
|
144 |
|
|
|
145 |
os.makedirs(output_dir, exist_ok=True)
|
146 |
-
output_no_audio
|
147 |
|
148 |
-
#
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
print(f"🎧 Target duration (voice-over) : {audio_duration:.2f} s")
|
153 |
|
154 |
-
#
|
155 |
all_videos = [
|
156 |
f for f in os.listdir(folder_path)
|
157 |
if f.lower().endswith((".mp4", ".mov", ".avi", ".mkv"))
|
158 |
]
|
159 |
-
if not all_videos:
|
160 |
-
raise RuntimeError(f"❌ No videos found in {folder_path}")
|
161 |
-
|
162 |
-
clips, total_duration = [], 0.0
|
163 |
-
|
164 |
-
for video_file in all_videos:
|
165 |
-
clip = VideoFileClip(os.path.join(folder_path, video_file))
|
166 |
-
|
167 |
-
# Resize/crop en 1080×1920
|
168 |
-
target_w, target_h = 1080, 1920
|
169 |
-
clip_ar, target_ar = clip.w / clip.h, target_w / target_h
|
170 |
-
if clip_ar > target_ar:
|
171 |
-
clip = clip.resized(height=target_h).cropped(width=target_w, x_center=clip.w/2)
|
172 |
-
else:
|
173 |
-
clip = clip.resized(width=target_w).cropped(height=target_h, y_center=clip.h/2)
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
if not clips:
|
184 |
-
|
|
|
185 |
|
|
|
186 |
clips = apply_crossfade_effects(clips, duration=0.15)
|
187 |
-
final_clip = concatenate_videoclips(clips, method="compose")
|
188 |
-
if audio_duration:
|
189 |
-
final_clip = final_clip.subclipped(0, audio_duration)
|
190 |
|
|
|
|
|
|
|
|
|
191 |
if show_progress_bar:
|
192 |
final_clip = add_timer_overlay(final_clip)
|
193 |
|
194 |
-
#
|
|
|
|
|
195 |
final_clip.write_videofile(
|
196 |
output_no_audio,
|
197 |
codec='libx264',
|
@@ -203,11 +229,14 @@ def get_video_montage_from_folder(
|
|
203 |
)
|
204 |
print(f"✅ Montage créé (SANS audio) : {output_no_audio}")
|
205 |
|
206 |
-
#
|
207 |
for c in clips:
|
208 |
c.close()
|
|
|
209 |
final_clip.close()
|
|
|
|
|
210 |
|
211 |
-
return
|
212 |
|
213 |
|
|
|
23 |
CompositeVideoClip
|
24 |
)
|
25 |
|
26 |
+
FONT_PATH = "C:/Windows/Fonts/arialbd.ttf"
|
27 |
|
28 |
def add_pan_effect(clip):
|
29 |
"""
|
|
|
130 |
|
131 |
def get_video_montage_from_folder(
|
132 |
folder_path: str = "./assets/videos",
|
133 |
+
audio_path: str = "./assets/audio/voice.mp3",
|
134 |
output_dir: str = "./assets/backgrounds",
|
135 |
lum: float = 6.0,
|
136 |
contrast: float = 1.0,
|
|
|
138 |
show_progress_bar: bool = True,
|
139 |
):
|
140 |
"""
|
141 |
+
1) Parcourt tous les fichiers vidéo dans 'folder_path'
|
142 |
+
2) Construit un montage vertical (1080x1920) en appliquant dynamic_effect()
|
143 |
+
et un crossfade entre chaque clip.
|
144 |
+
3) La durée totale est bornée à la durée de l'audio (on coupe le surplus).
|
145 |
+
4) Exporte deux versions : avec et sans audio.
|
146 |
"""
|
147 |
|
148 |
+
# Prépare les chemins de sortie
|
149 |
os.makedirs(output_dir, exist_ok=True)
|
150 |
+
output_no_audio = os.path.join(output_dir, "video_silent.mp4")
|
151 |
|
152 |
+
# Charge l'audio pour connaître la durée cible
|
153 |
+
voiceover = AudioFileClip(audio_path)
|
154 |
+
audio_duration = voiceover.duration
|
155 |
+
print(f"🎧 Durée audio : {audio_duration:.2f} s")
|
|
|
156 |
|
157 |
+
# Liste de tous les fichiers vidéo dans le dossier
|
158 |
all_videos = [
|
159 |
f for f in os.listdir(folder_path)
|
160 |
if f.lower().endswith((".mp4", ".mov", ".avi", ".mkv"))
|
161 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
+
if not all_videos:
|
164 |
+
print(f"❌ Aucune vidéo trouvée dans le dossier : {folder_path}")
|
165 |
+
return None, None
|
166 |
+
|
167 |
+
clips = []
|
168 |
+
total_duration = 0.0
|
169 |
+
|
170 |
+
# Parcours des vidéos dans l'ordre
|
171 |
+
for idx, video_file in enumerate(all_videos):
|
172 |
+
video_path = os.path.join(folder_path, video_file)
|
173 |
+
try:
|
174 |
+
clip = VideoFileClip(video_path)
|
175 |
+
|
176 |
+
# Redimensionne en 1080x1920 (vertical)
|
177 |
+
target_w, target_h = 1080, 1920
|
178 |
+
clip_ar = clip.w / clip.h
|
179 |
+
target_ar = target_w / target_h
|
180 |
+
|
181 |
+
if clip_ar > target_ar:
|
182 |
+
# On adapte la hauteur
|
183 |
+
clip = clip.resized(height=target_h)
|
184 |
+
# On coupe la largeur
|
185 |
+
clip = clip.cropped(width=target_w, x_center=clip.w / 2)
|
186 |
+
else:
|
187 |
+
# On adapte la largeur
|
188 |
+
clip = clip.resized(width=target_w)
|
189 |
+
# On coupe la hauteur
|
190 |
+
clip = clip.cropped(height=target_h, y_center=clip.h / 2)
|
191 |
+
|
192 |
+
# Applique l’effet dynamique
|
193 |
+
dynamic_clip = dynamic_effect(clip, lum, contrast, gamma)
|
194 |
+
clips.append(dynamic_clip)
|
195 |
+
total_duration += dynamic_clip.duration
|
196 |
+
|
197 |
+
# Si la somme dépasse la durée audio, on arrête la boucle
|
198 |
+
if total_duration >= audio_duration:
|
199 |
+
break
|
200 |
+
|
201 |
+
except Exception as e:
|
202 |
+
print(f"⚠️ Erreur avec le fichier {video_file} : {e}")
|
203 |
|
204 |
if not clips:
|
205 |
+
print("❌ Aucun clip valide. Montage impossible.")
|
206 |
+
return None, None
|
207 |
|
208 |
+
# Crossfade entre les clips
|
209 |
clips = apply_crossfade_effects(clips, duration=0.15)
|
|
|
|
|
|
|
210 |
|
211 |
+
# Concaténation, borne la durée totale à celle de l'audio
|
212 |
+
final_clip = concatenate_videoclips(clips, method="compose").subclipped(0, audio_duration)
|
213 |
+
|
214 |
+
# Overlay (par ex. barre de progression)
|
215 |
if show_progress_bar:
|
216 |
final_clip = add_timer_overlay(final_clip)
|
217 |
|
218 |
+
# --------------------
|
219 |
+
# 2) Version SANS audio
|
220 |
+
# --------------------
|
221 |
final_clip.write_videofile(
|
222 |
output_no_audio,
|
223 |
codec='libx264',
|
|
|
229 |
)
|
230 |
print(f"✅ Montage créé (SANS audio) : {output_no_audio}")
|
231 |
|
232 |
+
# Libère la mémoire
|
233 |
for c in clips:
|
234 |
c.close()
|
235 |
+
voiceover.close()
|
236 |
final_clip.close()
|
237 |
+
final_clip_with_audio.close()
|
238 |
+
|
239 |
|
240 |
+
return output_no_audio
|
241 |
|
242 |
|