AIPromoStudio / app.py
Bils's picture
Update app.py
1b36a14 verified
raw
history blame
16.5 kB
import os
import re
import torch
import tempfile
import logging
from scipy.io.wavfile import write
from pydub import AudioSegment
from dotenv import load_dotenv
import spaces
import gradio as gr
# Transformers & Models
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
pipeline,
AutoProcessor,
MusicgenForConditionalGeneration,
)
# Coqui TTS
from TTS.api import TTS
# ---------------------------------------------------------------------
# Setup Logging and Environment Variables
# ---------------------------------------------------------------------
logging.basicConfig(level=logging.INFO)
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# ---------------------------------------------------------------------
# Global Model Caches
# ---------------------------------------------------------------------
LLAMA_PIPELINES = {}
MUSICGEN_MODELS = {}
TTS_MODELS = {}
# ---------------------------------------------------------------------
# Utility Function
# ---------------------------------------------------------------------
def clean_text(text: str) -> str:
"""
Removes undesired characters (e.g., asterisks) that might not be recognized by the model's vocabulary.
"""
return re.sub(r'\*', '', text)
# ---------------------------------------------------------------------
# Model Helper Functions
# ---------------------------------------------------------------------
def get_llama_pipeline(model_id: str, token: str):
"""
Returns a cached LLaMA text-generation pipeline if available; otherwise, loads and caches it.
"""
if model_id in LLAMA_PIPELINES:
return LLAMA_PIPELINES[model_id]
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
model = AutoModelForCausalLM.from_pretrained(
model_id,
use_auth_token=token,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
)
text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
LLAMA_PIPELINES[model_id] = text_pipeline
return text_pipeline
def get_musicgen_model(model_key: str = "facebook/musicgen-large"):
"""
Returns a cached MusicGen model and processor if available; otherwise, loads and caches them.
Uses the 'large' variant for higher quality outputs.
"""
if model_key in MUSICGEN_MODELS:
return MUSICGEN_MODELS[model_key]
model = MusicgenForConditionalGeneration.from_pretrained(model_key)
processor = AutoProcessor.from_pretrained(model_key)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
MUSICGEN_MODELS[model_key] = (model, processor)
return model, processor
def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
"""
Returns a cached TTS model if available; otherwise, loads and caches it.
"""
if model_name in TTS_MODELS:
return TTS_MODELS[model_name]
tts_model = TTS(model_name)
TTS_MODELS[model_name] = tts_model
return tts_model
# ---------------------------------------------------------------------
# Script Generation Function
# ---------------------------------------------------------------------
@spaces.GPU(duration=100)
def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
"""
Generates a voice-over script, sound design suggestions, and music ideas from a user prompt.
Returns a tuple: (voice_script, sound_design, music_suggestions).
"""
try:
text_pipeline = get_llama_pipeline(model_id, token)
system_prompt = (
"You are an expert radio imaging producer specializing in sound design and music. "
f"Based on the user's concept and the selected duration of {duration} seconds, produce the following:\n"
"1. A concise voice-over script. Prefix this section with 'Voice-Over Script:'\n"
"2. Suggestions for sound design. Prefix this section with 'Sound Design Suggestions:'\n"
"3. Music styles or track recommendations. Prefix this section with 'Music Suggestions:'"
)
combined_prompt = f"{system_prompt}\nUser concept: {user_prompt}\nOutput:"
with torch.inference_mode():
result = text_pipeline(
combined_prompt,
max_new_tokens=300,
do_sample=True,
temperature=0.8
)
generated_text = result[0]["generated_text"]
if "Output:" in generated_text:
generated_text = generated_text.split("Output:")[-1].strip()
pattern = r"Voice-Over Script:\s*(.*?)\s*Sound Design Suggestions:\s*(.*?)\s*Music Suggestions:\s*(.*)"
match = re.search(pattern, generated_text, re.DOTALL)
if match:
voice_script, sound_design, music_suggestions = (grp.strip() for grp in match.groups())
else:
voice_script = "No voice-over script found."
sound_design = "No sound design suggestions found."
music_suggestions = "No music suggestions found."
return voice_script, sound_design, music_suggestions
except Exception as e:
logging.exception("Error generating script")
return f"Error generating script: {e}", "", ""
# ---------------------------------------------------------------------
# Voice-Over Generation Function
# ---------------------------------------------------------------------
@spaces.GPU(duration=100)
def generate_voice(script: str, tts_model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
"""
Generates a voice-over audio file from the provided script using Coqui TTS.
Returns the file path to the generated .wav file.
"""
try:
if not script.strip():
return "Error: No script provided."
cleaned_script = clean_text(script)
tts_model = get_tts_model(tts_model_name)
output_path = os.path.join(tempfile.gettempdir(), "voice_over.wav")
tts_model.tts_to_file(text=cleaned_script, file_path=output_path)
return output_path
except Exception as e:
logging.exception("Error generating voice")
return f"Error generating voice: {e}"
# ---------------------------------------------------------------------
# Music Generation Function
# ---------------------------------------------------------------------
@spaces.GPU(duration=200)
def generate_music(prompt: str, audio_length: int):
"""
Generates a music track from the 'facebook/musicgen-large' model based on the prompt.
Returns the file path to the generated .wav file.
"""
try:
if not prompt.strip():
return "Error: No music suggestion provided."
model_key = "facebook/musicgen-large"
musicgen_model, musicgen_processor = get_musicgen_model(model_key)
device = "cuda" if torch.cuda.is_available() else "cpu"
inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
with torch.inference_mode():
outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
audio_data = outputs[0, 0].cpu().numpy()
normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
output_path = os.path.join(tempfile.gettempdir(), "musicgen_large_generated_music.wav")
write(output_path, 44100, normalized_audio)
return output_path
except Exception as e:
logging.exception("Error generating music")
return f"Error generating music: {e}"
# ---------------------------------------------------------------------
# Audio Blending with Duration Sync & Ducking
# ---------------------------------------------------------------------
@spaces.GPU(duration=100)
def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int = 10):
"""
Blends two audio files (voice and music).
- Loops music if shorter than voice.
- Trims music if longer than voice.
- Applies ducking to lower music volume during voice segments if enabled.
Returns the file path to the blended .wav file.
"""
try:
if not os.path.isfile(voice_path) or not os.path.isfile(music_path):
return "Error: Missing audio files for blending."
voice = AudioSegment.from_wav(voice_path)
music = AudioSegment.from_wav(music_path)
voice_len = len(voice)
if len(music) < voice_len:
looped_music = AudioSegment.empty()
while len(looped_music) < voice_len:
looped_music += music
music = looped_music
music = music[:voice_len]
if ducking:
ducked_music = music - duck_level
final_audio = ducked_music.overlay(voice)
else:
final_audio = music.overlay(voice)
output_path = os.path.join(tempfile.gettempdir(), "blended_output.wav")
final_audio.export(output_path, format="wav")
return output_path
except Exception as e:
logging.exception("Error blending audio")
return f"Error blending audio: {e}"
# ---------------------------------------------------------------------
# Gradio Interface
# ---------------------------------------------------------------------
with gr.Blocks(css="""
/* Global Styles */
body {
background: linear-gradient(135deg, #1d1f21, #3a3d41);
color: #f0f0f0;
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
.header {
text-align: center;
padding: 2rem 1rem;
background: linear-gradient(90deg, #6a11cb, #2575fc);
border-radius: 0 0 20px 20px;
margin-bottom: 2rem;
}
.header h1 {
margin: 0;
font-size: 2.5rem;
}
.header p {
font-size: 1.2rem;
}
.gradio-container {
background: #2e2e2e;
border-radius: 10px;
padding: 1rem;
}
.tab-title {
font-size: 1.1rem;
font-weight: bold;
}
.footer {
text-align: center;
font-size: 0.9em;
margin-top: 2rem;
padding: 1rem;
color: #cccccc;
}
""") as demo:
# Custom Header
with gr.Row(elem_classes="header"):
gr.Markdown("""
<h1>🎧 Ai Ads Promo</h1>
<p>Your all-in-one AI solution for creating professional audio ads.</p>
""")
gr.Markdown("""
**Welcome to Ai Ads Promo!**
This simple, easy-to-use app helps you create amazing audio ads in just a few steps. Here’s how it works:
1. **Script Generation:**
- Share your idea and let our AI craft a clear and engaging voice-over script.
2. **Voice Synthesis:**
- Convert your script into a natural-sounding voice-over using advanced text-to-speech technology.
3. **Music Production:**
- Generate a custom music track that perfectly fits your ad.
4. **Audio Blending:**
- Combine your voice-over and music seamlessly. You can even adjust the music volume (ducking) when the voice plays.
**Benefits:**
- **Easy to Use:** Designed for everyone – no technical skills required.
- **Fast Results:** Quickly produce professional-sounding audio ads.
- **All-In-One:** Everything you need in one convenient app.
Get started now and create your perfect audio ad with Ai Ads Promo!
""")
with gr.Tabs():
# Step 1: Script Generation
with gr.Tab("📝 Script Generation"):
with gr.Row():
user_prompt = gr.Textbox(
label="Promo Idea",
placeholder="E.g., A 30-second ad for a morning show...",
lines=2
)
with gr.Row():
llama_model_id = gr.Textbox(
label="LLaMA Model ID",
value="meta-llama/Meta-Llama-3-8B-Instruct",
placeholder="Enter a valid Hugging Face model ID"
)
duration = gr.Slider(
label="Desired Ad Duration (seconds)",
minimum=15,
maximum=60,
step=15,
value=30
)
generate_script_button = gr.Button("Generate Script", variant="primary")
script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5, interactive=False)
sound_design_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
generate_script_button.click(
fn=lambda prompt, model_id, dur: generate_script(prompt, model_id, HF_TOKEN, dur),
inputs=[user_prompt, llama_model_id, duration],
outputs=[script_output, sound_design_output, music_suggestion_output],
)
# Step 2: Voice Synthesis
with gr.Tab("🎤 Voice Synthesis"):
gr.Markdown("Generate a natural-sounding voice-over using Coqui TTS.")
selected_tts_model = gr.Dropdown(
label="TTS Model",
choices=[
"tts_models/en/ljspeech/tacotron2-DDC",
"tts_models/en/ljspeech/vits",
"tts_models/en/sam/tacotron-DDC",
],
value="tts_models/en/ljspeech/tacotron2-DDC",
multiselect=False
)
generate_voice_button = gr.Button("Generate Voice-Over", variant="primary")
voice_audio_output = gr.Audio(label="Voice-Over (WAV)", type="filepath")
generate_voice_button.click(
fn=lambda script, tts_model: generate_voice(script, tts_model),
inputs=[script_output, selected_tts_model],
outputs=voice_audio_output,
)
# Step 3: Music Production
with gr.Tab("🎶 Music Production"):
gr.Markdown("Generate a custom music track using the **MusicGen Large** model.")
audio_length = gr.Slider(
label="Music Length (tokens)",
minimum=128,
maximum=1024,
step=64,
value=512,
info="Increase tokens for longer audio (inference time may vary)."
)
generate_music_button = gr.Button("Generate Music", variant="primary")
music_output = gr.Audio(label="Generated Music (WAV)", type="filepath")
generate_music_button.click(
fn=lambda music_prompt, length: generate_music(music_prompt, length),
inputs=[music_suggestion_output, audio_length],
outputs=[music_output],
)
# Step 4: Audio Blending
with gr.Tab("🎚️ Audio Blending"):
gr.Markdown("Blend your voice-over and music track. Music will be looped or trimmed to match your voice duration. Enable ducking to lower the music while the voice plays.")
ducking_checkbox = gr.Checkbox(label="Enable Ducking?", value=True)
duck_level_slider = gr.Slider(
label="Ducking Level (dB attenuation)",
minimum=0,
maximum=20,
step=1,
value=10
)
blend_button = gr.Button("Blend Voice + Music", variant="primary")
blended_output = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
blend_button.click(
fn=blend_audio,
inputs=[voice_audio_output, music_output, ducking_checkbox, duck_level_slider],
outputs=blended_output
)
# Footer
gr.Markdown("""
<div class="footer">
<hr>
Created with ❤️ by <a href="https://bilsimaging.com" target="_blank" style="color: #88aaff;">bilsimaging.com</a>
<br>
<small>Ai Ads Promo &copy; 2025</small>
</div>
""")
# Visitor Badge
gr.HTML("""
<div style="text-align: center; margin-top: 1rem;">
<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" alt="visitor badge"/>
</a>
</div>
""")
demo.launch(debug=True)