Bils commited on
Commit
464b686
Β·
verified Β·
1 Parent(s): a92463e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +271 -324
app.py CHANGED
@@ -13,366 +13,313 @@ from pydub import AudioSegment
13
  from dotenv import load_dotenv
14
  import tempfile
15
  import spaces
16
-
17
- # Coqui TTS
18
  from TTS.api import TTS
19
 
20
- # ---------------------------------------------------------------------
21
- # Load Environment Variables
22
- # ---------------------------------------------------------------------
23
  load_dotenv()
24
  HF_TOKEN = os.getenv("HF_TOKEN")
25
 
26
- # ---------------------------------------------------------------------
27
- # Global Model Caches
28
- # ---------------------------------------------------------------------
29
- LLAMA_PIPELINES = {}
30
- MUSICGEN_MODELS = {}
31
- TTS_MODELS = {}
32
-
33
- # ---------------------------------------------------------------------
34
- # Helper Functions
35
- # ---------------------------------------------------------------------
36
- def get_llama_pipeline(model_id: str, token: str):
37
- """
38
- Returns a cached LLaMA pipeline if available; otherwise, loads it.
39
- """
40
- if model_id in LLAMA_PIPELINES:
41
- return LLAMA_PIPELINES[model_id]
42
-
43
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
44
- model = AutoModelForCausalLM.from_pretrained(
45
- model_id,
46
- use_auth_token=token,
47
- torch_dtype=torch.float16,
48
- device_map="auto",
49
- trust_remote_code=True,
50
- )
51
- text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
52
- LLAMA_PIPELINES[model_id] = text_pipeline
53
- return text_pipeline
54
-
55
-
56
- def get_musicgen_model(model_key: str = "facebook/musicgen-large"):
57
- """
58
- Returns a cached MusicGen model if available; otherwise, loads it.
59
- Uses the 'large' variant for higher quality outputs.
60
- """
61
- if model_key in MUSICGEN_MODELS:
62
- return MUSICGEN_MODELS[model_key]
63
-
64
- model = MusicgenForConditionalGeneration.from_pretrained(model_key)
65
- processor = AutoProcessor.from_pretrained(model_key)
66
-
67
- device = "cuda" if torch.cuda.is_available() else "cpu"
68
- model.to(device)
69
- MUSICGEN_MODELS[model_key] = (model, processor)
70
- return model, processor
71
-
72
-
73
- def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
74
- """
75
- Returns a cached TTS model if available; otherwise, loads it.
76
- """
77
- if model_name in TTS_MODELS:
78
- return TTS_MODELS[model_name]
79
-
80
- tts_model = TTS(model_name)
81
- TTS_MODELS[model_name] = tts_model
82
- return tts_model
83
-
84
-
85
- # ---------------------------------------------------------------------
86
- # Script Generation Function
87
- # ---------------------------------------------------------------------
88
- @spaces.GPU(duration=100)
89
- def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
90
- """
91
- Generates a script, sound design suggestions, and music ideas from a user prompt.
92
- Returns a tuple of strings: (voice_script, sound_design, music_suggestions).
93
- """
94
  try:
95
- text_pipeline = get_llama_pipeline(model_id, token)
96
-
97
- system_prompt = (
98
- "You are an expert radio imaging producer specializing in sound design and music. "
99
- f"Based on the user's concept and the selected duration of {duration} seconds, produce the following: "
100
- "1. A concise voice-over script. Prefix this section with 'Voice-Over Script:'.\n"
101
- "2. Suggestions for sound design. Prefix this section with 'Sound Design Suggestions:'.\n"
102
- "3. Music styles or track recommendations. Prefix this section with 'Music Suggestions:'."
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  )
104
- combined_prompt = f"{system_prompt}\nUser concept: {user_prompt}\nOutput:"
105
-
106
- with torch.inference_mode():
107
- result = text_pipeline(
108
- combined_prompt,
109
- max_new_tokens=300,
110
- do_sample=True,
111
- temperature=0.8
112
- )
113
-
114
- generated_text = result[0]["generated_text"]
115
- if "Output:" in generated_text:
116
- generated_text = generated_text.split("Output:")[-1].strip()
117
-
118
- # Default placeholders
119
- voice_script = "No voice-over script found."
120
- sound_design = "No sound design suggestions found."
121
- music_suggestions = "No music suggestions found."
122
-
123
- # Voice-Over Script
124
- if "Voice-Over Script:" in generated_text:
125
- parts = generated_text.split("Voice-Over Script:")
126
- voice_script_part = parts[1]
127
- if "Sound Design Suggestions:" in voice_script_part:
128
- voice_script = voice_script_part.split("Sound Design Suggestions:")[0].strip()
129
- else:
130
- voice_script = voice_script_part.strip()
131
-
132
- # Sound Design
133
- if "Sound Design Suggestions:" in generated_text:
134
- parts = generated_text.split("Sound Design Suggestions:")
135
- sound_design_part = parts[1]
136
- if "Music Suggestions:" in sound_design_part:
137
- sound_design = sound_design_part.split("Music Suggestions:")[0].strip()
138
- else:
139
- sound_design = sound_design_part.strip()
140
-
141
- # Music Suggestions
142
- if "Music Suggestions:" in generated_text:
143
- parts = generated_text.split("Music Suggestions:")
144
- music_suggestions = parts[1].strip()
145
-
146
- return voice_script, sound_design, music_suggestions
147
 
 
 
148
  except Exception as e:
149
- return f"Error generating script: {e}", "", ""
150
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- # ---------------------------------------------------------------------
153
- # Voice-Over Generation Function
154
- # ---------------------------------------------------------------------
155
  @spaces.GPU(duration=100)
156
- def generate_voice(script: str, tts_model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
157
- """
158
- Generates a voice-over from the provided script using the Coqui TTS model.
159
- Returns the file path to the generated .wav file.
160
- """
161
  try:
162
  if not script.strip():
163
- return "Error: No script provided."
164
-
165
- tts_model = get_tts_model(tts_model_name)
166
-
167
- # Generate and save voice
168
- output_path = os.path.join(tempfile.gettempdir(), "voice_over.wav")
169
- tts_model.tts_to_file(text=script, file_path=output_path)
 
 
 
170
  return output_path
171
-
172
  except Exception as e:
173
- return f"Error generating voice: {e}"
174
-
175
 
176
- # ---------------------------------------------------------------------
177
- # Music Generation Function
178
- # ---------------------------------------------------------------------
179
- @spaces.GPU(duration=100)
180
- def generate_music(prompt: str, audio_length: int):
181
- """
182
- Generates music from the 'facebook/musicgen-large' model based on the prompt.
183
- Returns the file path to the generated .wav file.
184
- """
185
  try:
186
- if not prompt.strip():
187
- return "Error: No music suggestion provided."
188
-
189
- model_key = "facebook/musicgen-large"
190
- musicgen_model, musicgen_processor = get_musicgen_model(model_key)
191
-
192
  device = "cuda" if torch.cuda.is_available() else "cpu"
193
- inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
194
-
195
- with torch.inference_mode():
196
- outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
197
-
198
- audio_data = outputs[0, 0].cpu().numpy()
199
- normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
200
-
201
- output_path = f"{tempfile.gettempdir()}/musicgen_large_generated_music.wav"
202
- write(output_path, 44100, normalized_audio)
 
 
 
 
203
 
 
 
204
  return output_path
205
-
206
  except Exception as e:
207
- return f"Error generating music: {e}"
208
-
209
 
210
- # ---------------------------------------------------------------------
211
- # Audio Blending with Duration Sync & Ducking
212
- # ---------------------------------------------------------------------
213
- @spaces.GPU(duration=100)
214
- def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int = 10):
215
- """
216
- Blends two audio files (voice and music).
217
- 1. If music < voice, loops the music until it meets/exceeds the voice duration.
218
- 2. If music > voice, trims music to the voice duration.
219
- 3. If ducking=True, the music is attenuated by 'duck_level' dB while the voice is playing.
220
- Returns the file path to the blended .wav file.
221
- """
222
  try:
223
- if not os.path.isfile(voice_path) or not os.path.isfile(music_path):
224
- return "Error: Missing audio files for blending."
225
-
226
  voice = AudioSegment.from_wav(voice_path)
227
  music = AudioSegment.from_wav(music_path)
228
-
229
- voice_len = len(voice) # in milliseconds
230
- music_len = len(music) # in milliseconds
231
-
232
- # 1) If the music is shorter than the voice, loop it:
233
- if music_len < voice_len:
234
- looped_music = AudioSegment.empty()
235
- # Keep appending until we exceed voice length
236
- while len(looped_music) < voice_len:
237
- looped_music += music
238
- music = looped_music
239
-
240
- # 2) If the music is longer than the voice, truncate it:
241
- if len(music) > voice_len:
242
- music = music[:voice_len]
243
-
244
- # Now music and voice are the same length
245
  if ducking:
246
- # Step 1: Reduce music dB while voice is playing
247
  ducked_music = music - duck_level
248
- # Step 2: Overlay voice on top of ducked music
249
- final_audio = ducked_music.overlay(voice)
250
  else:
251
- # No ducking, just overlay
252
- final_audio = music.overlay(voice)
253
-
254
- output_path = os.path.join(tempfile.gettempdir(), "blended_output.wav")
255
- final_audio.export(output_path, format="wav")
256
  return output_path
257
-
258
  except Exception as e:
259
- return f"Error blending audio: {e}"
260
-
261
 
262
- # ---------------------------------------------------------------------
263
  # Gradio Interface
264
- # ---------------------------------------------------------------------
265
- with gr.Blocks() as demo:
266
- gr.Markdown("""
267
- # 🎧 AI Promo Studio
268
- Welcome to **AI Promo Studio**, your all-in-one solution for creating professional, engaging audio promos with minimal effort!
269
-
270
- This next-generation platform uses powerful AI models to handle:
271
- - **Script Generation**: Craft concise and impactful copy with LLaMA.
272
- - **Voice Synthesis**: Convert text into natural-sounding voice-overs using Coqui TTS.
273
- - **Music Production**: Generate custom music tracks with MusicGen Large for sound bed.
274
- - **Seamless Blending**: Easily combine voice and musicβ€”loop or trim tracks to match your desired promo length, with optional ducking to keep the voice front and center.
275
-
276
- Whether you’re a radio producer, podcaster, or content creator, **AI Promo Studio** streamlines your entire production pipelineβ€”cutting hours of manual editing down to a few clicks.
277
- """)
278
-
279
 
 
 
 
 
 
 
280
  with gr.Tabs():
281
- # Step 1: Generate Script
282
- with gr.Tab("Step 1: Generate Script"):
283
  with gr.Row():
284
- user_prompt = gr.Textbox(
285
- label="Promo Idea",
286
- placeholder="E.g., A 30-second promo for a morning show...",
287
- lines=2
288
- )
289
- llama_model_id = gr.Textbox(
290
- label="LLaMA Model ID",
291
- value="meta-llama/Meta-Llama-3-8B-Instruct",
292
- placeholder="Enter a valid Hugging Face model ID"
293
- )
294
- duration = gr.Slider(
295
- label="Desired Promo Duration (seconds)",
296
- minimum=15,
297
- maximum=60,
298
- step=15,
299
- value=30
300
- )
301
-
302
- generate_script_button = gr.Button("Generate Script")
303
- script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5, interactive=False)
304
- sound_design_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
305
- music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
306
-
307
- generate_script_button.click(
308
- fn=lambda user_prompt, model_id, dur: generate_script(user_prompt, model_id, HF_TOKEN, dur),
309
- inputs=[user_prompt, llama_model_id, duration],
310
- outputs=[script_output, sound_design_output, music_suggestion_output],
311
- )
312
-
313
- # Step 2: Generate Voice
314
- with gr.Tab("Step 2: Generate Voice"):
315
- gr.Markdown("Generate the voice-over using a Coqui TTS model.")
316
- selected_tts_model = gr.Dropdown(
317
- label="TTS Model",
318
- choices=[
319
- "tts_models/en/ljspeech/tacotron2-DDC",
320
- "tts_models/en/ljspeech/vits",
321
- "tts_models/en/sam/tacotron-DDC",
322
- ],
323
- value="tts_models/en/ljspeech/tacotron2-DDC",
324
- multiselect=False
325
- )
326
- generate_voice_button = gr.Button("Generate Voice-Over")
327
- voice_audio_output = gr.Audio(label="Voice-Over (WAV)", type="filepath")
328
-
329
- generate_voice_button.click(
330
- fn=lambda script, tts_model: generate_voice(script, tts_model),
331
- inputs=[script_output, selected_tts_model],
332
- outputs=voice_audio_output,
333
- )
334
-
335
- # Step 3: Generate Music (MusicGen Large)
336
- with gr.Tab("Step 3: Generate Music"):
337
- gr.Markdown("Generate a music track with the **MusicGen Large** model.")
338
- audio_length = gr.Slider(
339
- label="Music Length (tokens)",
340
- minimum=128,
341
- maximum=1024,
342
- step=64,
343
- value=512,
344
- info="Increase tokens for longer audio, but be mindful of inference time."
345
- )
346
- generate_music_button = gr.Button("Generate Music")
347
- music_output = gr.Audio(label="Generated Music (WAV)", type="filepath")
348
-
349
- generate_music_button.click(
350
- fn=lambda music_suggestion, length: generate_music(music_suggestion, length),
351
- inputs=[music_suggestion_output, audio_length],
352
- outputs=[music_output],
353
  )
354
 
355
- # Step 4: Blend Audio (Loop/Trim + Ducking)
356
- with gr.Tab("Step 4: Blend Audio"):
357
- gr.Markdown("**Music** will be looped or trimmed to match **Voice** duration, then optionally ducked.")
358
- ducking_checkbox = gr.Checkbox(label="Enable Ducking?", value=True)
359
- duck_level_slider = gr.Slider(
360
- label="Ducking Level (dB attenuation)",
361
- minimum=0,
362
- maximum=20,
363
- step=1,
364
- value=10
365
- )
366
- blend_button = gr.Button("Blend Voice + Music")
367
- blended_output = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
368
-
369
- blend_button.click(
370
- fn=blend_audio,
371
- inputs=[voice_audio_output, music_output, ducking_checkbox, duck_level_slider],
372
- outputs=blended_output
373
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
 
375
- # Footer
376
  gr.Markdown("""
377
  <hr>
378
  <p style="text-align: center; font-size: 0.9em;">
@@ -380,11 +327,11 @@ Whether you’re a radio producer, podcaster, or content creator, **AI Promo Stu
380
  </p>
381
  """)
382
 
383
- # Visitor Badge
384
  gr.HTML("""
385
  <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
386
  <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" />
387
  </a>
388
  """)
389
 
390
- demo.launch(debug=True)
 
 
13
  from dotenv import load_dotenv
14
  import tempfile
15
  import spaces
 
 
16
  from TTS.api import TTS
17
 
18
+ # -------------------------------
19
+ # Configuration
20
+ # -------------------------------
21
  load_dotenv()
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
 
24
+ MODEL_CONFIG = {
25
+ "llama_models": {
26
+ "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct",
27
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
28
+ "Phi-3-mini": "microsoft/Phi-3-mini-4k-instruct"
29
+ },
30
+ "tts_models": {
31
+ "Standard English": "tts_models/en/ljspeech/tacotron2-DDC",
32
+ "High Quality": "tts_models/en/ljspeech/vits",
33
+ "Fast Inference": "tts_models/en/sam/tacotron-DDC"
34
+ }
35
+ }
36
+
37
+ # -------------------------------
38
+ # Model Manager
39
+ # -------------------------------
40
+ class ModelManager:
41
+ def __init__(self):
42
+ self.llama_pipelines = {}
43
+ self.musicgen_models = {}
44
+ self.tts_models = {}
45
+
46
+ def get_llama_pipeline(self, model_id, token):
47
+ if model_id not in self.llama_pipelines:
48
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
49
+ model = AutoModelForCausalLM.from_pretrained(
50
+ model_id,
51
+ token=token,
52
+ torch_dtype=torch.float16,
53
+ device_map="auto",
54
+ attn_implementation="flash_attention_2"
55
+ )
56
+ self.llama_pipelines[model_id] = pipeline(
57
+ "text-generation",
58
+ model=model,
59
+ tokenizer=tokenizer,
60
+ device_map="auto"
61
+ )
62
+ return self.llama_pipelines[model_id]
63
+
64
+ def get_musicgen_model(self, model_key="facebook/musicgen-large"):
65
+ if model_key not in self.musicgen_models:
66
+ model = MusicgenForConditionalGeneration.from_pretrained(model_key)
67
+ processor = AutoProcessor.from_pretrained(model_key)
68
+ device = "cuda" if torch.cuda.is_available() else "cpu"
69
+ model.to(device)
70
+ self.musicgen_models[model_key] = (model, processor)
71
+ return self.musicgen_models[model_key]
72
+
73
+ def get_tts_model(self, model_name):
74
+ if model_name not in self.tts_models:
75
+ self.tts_models[model_name] = TTS(model_name)
76
+ return self.tts_models[model_name]
77
+
78
+ model_manager = ModelManager()
79
+
80
+ # -------------------------------
81
+ # Core Functions
82
+ # -------------------------------
83
+ @spaces.GPU(duration=120)
84
+ def generate_script(user_prompt, model_id, duration, temperature=0.7, max_tokens=512):
 
 
 
 
 
 
 
85
  try:
86
+ text_pipeline = model_manager.get_llama_pipeline(model_id, HF_TOKEN)
87
+
88
+ system_prompt = f"""You are an AI audio production assistant. Create content for a {duration}-second promo:
89
+ 1. Voice Script: [Clear, engaging narration]
90
+ 2. Sound Design: [3-5 specific sound effects]
91
+ 3. Music: [Genre, tempo, mood suggestions]
92
+
93
+ Keep sections concise and production-ready."""
94
+
95
+ messages = [
96
+ {"role": "system", "content": system_prompt},
97
+ {"role": "user", "content": user_prompt}
98
+ ]
99
+
100
+ response = text_pipeline(
101
+ messages,
102
+ max_new_tokens=max_tokens,
103
+ temperature=temperature,
104
+ do_sample=True,
105
+ top_p=0.95,
106
+ eos_token_id=text_pipeline.tokenizer.eos_token_id
107
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
+ return parse_generated_content(response[0]['generated_text'][-1]['content'])
110
+
111
  except Exception as e:
112
+ return f"Error: {str(e)}", "", ""
113
+
114
+ def parse_generated_content(text):
115
+ sections = {
116
+ "Voice Script": "",
117
+ "Sound Design": "",
118
+ "Music": ""
119
+ }
120
+ current_section = None
121
+
122
+ for line in text.split('\n'):
123
+ line = line.strip()
124
+ if "Voice Script:" in line:
125
+ current_section = "Voice Script"
126
+ line = line.replace("Voice Script:", "").strip()
127
+ elif "Sound Design:" in line:
128
+ current_section = "Sound Design"
129
+ line = line.replace("Sound Design:", "").strip()
130
+ elif "Music:" in line:
131
+ current_section = "Music"
132
+ line = line.replace("Music:", "").strip()
133
+
134
+ if current_section and line:
135
+ sections[current_section] += line + "\n"
136
+
137
+ return sections["Voice Script"].strip(), sections["Sound Design"].strip(), sections["Music"].strip()
138
 
 
 
 
139
  @spaces.GPU(duration=100)
140
+ def generate_voice(script, tts_model, speed=1.0):
 
 
 
 
141
  try:
142
  if not script.strip():
143
+ raise ValueError("Empty script")
144
+
145
+ tts = model_manager.get_tts_model(tts_model)
146
+ output_path = os.path.join(tempfile.gettempdir(), "enhanced_voice.wav")
147
+
148
+ tts.tts_to_file(
149
+ text=script,
150
+ file_path=output_path,
151
+ speed=speed
152
+ )
153
  return output_path
 
154
  except Exception as e:
155
+ return f"Error: {str(e)}"
 
156
 
157
+ @spaces.GPU(duration=150)
158
+ def generate_music(prompt, duration_sec=30, temperature=1.0, guidance_scale=3.0):
 
 
 
 
 
 
 
159
  try:
160
+ model, processor = model_manager.get_musicgen_model()
 
 
 
 
 
161
  device = "cuda" if torch.cuda.is_available() else "cpu"
162
+
163
+ inputs = processor(
164
+ text=[prompt],
165
+ padding=True,
166
+ return_tensors="pt",
167
+ ).to(device)
168
+
169
+ audio_values = model.generate(
170
+ **inputs,
171
+ max_new_tokens=int(duration_sec * 50),
172
+ temperature=temperature,
173
+ guidance_scale=guidance_scale,
174
+ do_sample=True
175
+ )
176
 
177
+ output_path = os.path.join(tempfile.gettempdir(), "enhanced_music.wav")
178
+ write(output_path, 32000, audio_values[0, 0].cpu().numpy())
179
  return output_path
 
180
  except Exception as e:
181
+ return f"Error: {str(e)}"
 
182
 
183
+ def blend_audio(voice_path, music_path, ducking=True, duck_level=10, crossfade=500):
 
 
 
 
 
 
 
 
 
 
 
184
  try:
 
 
 
185
  voice = AudioSegment.from_wav(voice_path)
186
  music = AudioSegment.from_wav(music_path)
187
+
188
+ if len(music) < len(voice):
189
+ loops = (len(voice) // len(music)) + 1
190
+ music = music * loops
191
+
192
+ music = music[:len(voice)].fade_out(crossfade)
193
+
 
 
 
 
 
 
 
 
 
 
194
  if ducking:
 
195
  ducked_music = music - duck_level
196
+ mixed = ducked_music.overlay(voice.fade_in(crossfade))
 
197
  else:
198
+ mixed = music.overlay(voice)
199
+
200
+ output_path = os.path.join(tempfile.gettempdir(), "enhanced_mix.wav")
201
+ mixed.export(output_path, format="wav")
 
202
  return output_path
 
203
  except Exception as e:
204
+ return f"Error: {str(e)}"
 
205
 
206
+ # -------------------------------
207
  # Gradio Interface
208
+ # -------------------------------
209
+ theme = gr.themes.Soft(
210
+ primary_hue="blue",
211
+ secondary_hue="teal",
212
+ ).set(
213
+ body_text_color_dark='#FFFFFF',
214
+ background_fill_primary_dark='#1F1F1F'
215
+ )
 
 
 
 
 
 
 
216
 
217
+ with gr.Blocks(theme=theme, title="AI Audio Studio Pro") as demo:
218
+ gr.Markdown("""
219
+ # πŸŽ™οΈ AI Audio Studio Pro
220
+ *Next-generation audio production powered by AI*
221
+ """)
222
+
223
  with gr.Tabs():
224
+ with gr.Tab("🎯 Concept Development"):
 
225
  with gr.Row():
226
+ with gr.Column(scale=2):
227
+ concept_input = gr.Textbox(
228
+ label="Your Concept",
229
+ placeholder="Describe your audio project...",
230
+ lines=3,
231
+ max_lines=6
232
+ )
233
+ with gr.Accordion("Advanced Settings", open=False):
234
+ with gr.Row():
235
+ model_selector = gr.Dropdown(
236
+ choices=list(MODEL_CONFIG["llama_models"].values()),
237
+ label="AI Model",
238
+ value=MODEL_CONFIG["llama_models"]["Meta-Llama-3-8B"]
239
+ )
240
+ duration_slider = gr.Slider(15, 120, value=30, step=15, label="Duration (seconds)")
241
+ with gr.Row():
242
+ temp_slider = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Creativity")
243
+ token_slider = gr.Slider(128, 1024, value=512, step=128, label="Max Length")
244
+
245
+ generate_btn = gr.Button("✨ Generate Concept", variant="primary")
246
+
247
+ with gr.Column(scale=1):
248
+ script_output = gr.Textbox(label="Voice Script", interactive=True)
249
+ sound_output = gr.Textbox(label="Sound Design", interactive=True)
250
+ music_output = gr.Textbox(label="Music Suggestions", interactive=True)
251
+
252
+ generate_btn.click(
253
+ generate_script,
254
+ inputs=[concept_input, model_selector, duration_slider, temp_slider, token_slider],
255
+ outputs=[script_output, sound_output, music_output]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  )
257
 
258
+ with gr.Tab("πŸ—£οΈ Voice Production"):
259
+ with gr.Row():
260
+ with gr.Column():
261
+ tts_model = gr.Dropdown(
262
+ choices=list(MODEL_CONFIG["tts_models"].values()),
263
+ label="Voice Model",
264
+ value=MODEL_CONFIG["tts_models"]["Standard English"]
265
+ )
266
+ speed_slider = gr.Slider(0.5, 2.0, value=1.0, step=0.1, label="Speaking Rate")
267
+ voice_btn = gr.Button("πŸŽ™οΈ Generate Voiceover", variant="primary")
268
+ with gr.Column():
269
+ voice_preview = gr.Audio(label="Preview", interactive=False)
270
+ voice_btn.click(
271
+ generate_voice,
272
+ inputs=[script_output, tts_model, speed_slider],
273
+ outputs=voice_preview
274
+ )
275
+
276
+ with gr.Tab("🎢 Music Production"):
277
+ with gr.Row():
278
+ with gr.Column():
279
+ with gr.Accordion("Music Parameters", open=True):
280
+ music_duration = gr.Slider(10, 120, value=30, label="Duration (seconds)")
281
+ music_temp = gr.Slider(0.1, 2.0, value=1.0, label="Creativity")
282
+ guidance_scale = gr.Slider(1.0, 5.0, value=3.0, label="Focus")
283
+ music_btn = gr.Button("🎡 Generate Music", variant="primary")
284
+ with gr.Column():
285
+ music_preview = gr.Audio(label="Preview", interactive=False)
286
+ music_btn.click(
287
+ generate_music,
288
+ inputs=[music_output, music_duration, music_temp, guidance_scale],
289
+ outputs=music_preview
290
+ )
291
+
292
+ with gr.Tab("πŸ”Š Final Mix"):
293
+ with gr.Row():
294
+ with gr.Column():
295
+ ducking_toggle = gr.Checkbox(value=True, label="Enable Voice Ducking")
296
+ duck_level = gr.Slider(0, 30, value=12, label="Ducking Strength (dB)")
297
+ crossfade_time = gr.Slider(0, 2000, value=500, label="Crossfade (ms)")
298
+ mix_btn = gr.Button("πŸš€ Create Final Mix", variant="primary")
299
+ with gr.Column():
300
+ final_mix = gr.Audio(label="Master Output", interactive=False)
301
+ mix_btn.click(
302
+ blend_audio,
303
+ inputs=[voice_preview, music_preview, ducking_toggle, duck_level, crossfade_time],
304
+ outputs=final_mix
305
+ )
306
+
307
+ with gr.Accordion("πŸ“š Example Prompts", open=False):
308
+ gr.Examples(
309
+ examples=[
310
+ ["A 30-second tech podcast intro with futuristic sounds"],
311
+ ["A 15-second radio ad for a coffee shop with morning vibes"],
312
+ ["A 60-second documentary trailer with epic orchestral music"]
313
+ ],
314
+ inputs=concept_input
315
+ )
316
+
317
+ with gr.Row():
318
+ gr.Markdown("### System Resources")
319
+ gpu_status = gr.Textbox(label="GPU Utilization", interactive=False)
320
+ ram_status = gr.Textbox(label="RAM Usage", interactive=False)
321
 
322
+ # Custom Footer
323
  gr.Markdown("""
324
  <hr>
325
  <p style="text-align: center; font-size: 0.9em;">
 
327
  </p>
328
  """)
329
 
 
330
  gr.HTML("""
331
  <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
332
  <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" />
333
  </a>
334
  """)
335
 
336
+ if __name__ == "__main__":
337
+ demo.launch(server_name="0.0.0.0", server_port=7860)