Bils commited on
Commit
217c4b5
Β·
verified Β·
1 Parent(s): 9b010cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -42
app.py CHANGED
@@ -8,7 +8,6 @@ from transformers import (
8
  AutoProcessor,
9
  MusicgenForConditionalGeneration,
10
  )
11
- from diffusers import StableDiffusionPipeline
12
  from scipy.io.wavfile import write
13
  from pydub import AudioSegment
14
  from dotenv import load_dotenv
@@ -25,6 +24,7 @@ hf_token = os.getenv("HF_TOKEN")
25
  # Script Generation Function
26
  # ---------------------------------------------------------------------
27
  @spaces.GPU(duration=300)
 
28
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
29
  try:
30
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
@@ -87,36 +87,24 @@ def generate_voice(script: str, speaker: str = "default"):
87
  # Music Generation Function
88
  # ---------------------------------------------------------------------
89
  @spaces.GPU(duration=300)
90
- def generate_music(prompt: str, audio_length: int, model_choice: str):
91
  try:
92
- if model_choice == "Stable Audio Open 1.0":
93
- stable_pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-audio-open-1.0")
94
- stable_pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
95
-
96
- audio = stable_pipeline(prompt, num_inference_steps=50, guidance_scale=7.5)
97
- output_path = f"{tempfile.gettempdir()}/stable_generated_music.wav"
98
- write(output_path, 44100, audio["sample"].cpu().numpy())
99
- return output_path
100
 
101
- elif model_choice == "MusicGen":
102
- musicgen_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
103
- musicgen_processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
104
 
105
- device = "cuda" if torch.cuda.is_available() else "cpu"
106
- musicgen_model.to(device)
107
 
108
- inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
109
- outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
110
 
111
- audio_data = outputs[0, 0].cpu().numpy()
112
- normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
113
 
114
- output_path = f"{tempfile.gettempdir()}/musicgen_generated_music.wav"
115
- write(output_path, 44100, normalized_audio)
116
- return output_path
117
-
118
- else:
119
- return "Invalid model choice!"
120
  except Exception as e:
121
  return f"Error generating music: {e}"
122
 
@@ -144,12 +132,11 @@ def blend_audio(voice_path: str, music_path: str, ducking: bool):
144
  # ---------------------------------------------------------------------
145
  with gr.Blocks() as demo:
146
  gr.Markdown("""
147
- # 🎡 AI Promo Studio πŸš€
148
- Generate scripts, sound design, and music suggestions with ease.
149
  """)
150
 
151
  with gr.Tabs():
152
- # Step 1: Generate Script
153
  with gr.Tab("Step 1: Generate Script"):
154
  with gr.Row():
155
  user_prompt = gr.Textbox(label="Promo Idea", placeholder="E.g., A 30-second promo for a morning show.")
@@ -157,17 +144,15 @@ with gr.Blocks() as demo:
157
  duration = gr.Slider(label="Duration (seconds)", minimum=15, maximum=60, step=15, value=30)
158
 
159
  generate_script_button = gr.Button("Generate Script")
160
- script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5)
161
- sound_design_output = gr.Textbox(label="Sound Design Ideas", lines=3)
162
- music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3)
163
 
164
  generate_script_button.click(
165
  fn=lambda user_prompt, model_id, duration: generate_script(user_prompt, model_id, hf_token, duration),
166
  inputs=[user_prompt, llama_model_id, duration],
167
- outputs=[script_output, sound_design_output, music_suggestion_output],
168
  )
169
 
170
- # Step 2: Generate Voice
171
  with gr.Tab("Step 2: Generate Voice"):
172
  with gr.Row():
173
  speaker = gr.Textbox(label="Voice Style (optional)", placeholder="E.g., male, female, or neutral.")
@@ -181,22 +166,16 @@ with gr.Blocks() as demo:
181
  outputs=[voice_output],
182
  )
183
 
184
- # Step 3: Generate Music
185
  with gr.Tab("Step 3: Generate Music"):
186
  with gr.Row():
187
  audio_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
188
- model_choice = gr.Dropdown(
189
- label="Select Music Generation Model",
190
- choices=["Stable Audio Open 1.0", "MusicGen"],
191
- value="Stable Audio Open 1.0"
192
- )
193
 
194
  generate_music_button = gr.Button("Generate Music")
195
  music_output = gr.Audio(label="Generated Music", type="filepath")
196
 
197
  generate_music_button.click(
198
- fn=lambda music_suggestion, audio_length, model_choice: generate_music(music_suggestion, audio_length, model_choice),
199
- inputs=[music_suggestion_output, audio_length, model_choice],
200
  outputs=[music_output],
201
  )
202
 
@@ -220,4 +199,4 @@ with gr.Blocks() as demo:
220
  </p>
221
  """)
222
 
223
- demo.launch(debug=True)
 
8
  AutoProcessor,
9
  MusicgenForConditionalGeneration,
10
  )
 
11
  from scipy.io.wavfile import write
12
  from pydub import AudioSegment
13
  from dotenv import load_dotenv
 
24
  # Script Generation Function
25
  # ---------------------------------------------------------------------
26
  @spaces.GPU(duration=300)
27
+ @spaces.GPU(duration=300)
28
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
29
  try:
30
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
 
87
  # Music Generation Function
88
  # ---------------------------------------------------------------------
89
  @spaces.GPU(duration=300)
90
+ def generate_music(prompt: str, audio_length: int):
91
  try:
92
+ musicgen_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
93
+ musicgen_processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
 
 
 
 
 
 
94
 
95
+ device = "cuda" if torch.cuda.is_available() else "cpu"
96
+ musicgen_model.to(device)
 
97
 
98
+ inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
99
+ outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
100
 
101
+ audio_data = outputs[0, 0].cpu().numpy()
102
+ normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
103
 
104
+ output_path = f"{tempfile.gettempdir()}/generated_music.wav"
105
+ write(output_path, 44100, normalized_audio)
106
 
107
+ return output_path
 
 
 
 
 
108
  except Exception as e:
109
  return f"Error generating music: {e}"
110
 
 
132
  # ---------------------------------------------------------------------
133
  with gr.Blocks() as demo:
134
  gr.Markdown("""
135
+ # 🎧 AI Promo Studio with Step-by-Step Script, Voice, Music, and Mixing πŸš€
136
+ Generate and mix radio promos effortlessly with AI tools!
137
  """)
138
 
139
  with gr.Tabs():
 
140
  with gr.Tab("Step 1: Generate Script"):
141
  with gr.Row():
142
  user_prompt = gr.Textbox(label="Promo Idea", placeholder="E.g., A 30-second promo for a morning show.")
 
144
  duration = gr.Slider(label="Duration (seconds)", minimum=15, maximum=60, step=15, value=30)
145
 
146
  generate_script_button = gr.Button("Generate Script")
147
+ script_output = gr.Textbox(label="Generated Script")
148
+ music_suggestion_output = gr.Textbox(label="Music Suggestion")
 
149
 
150
  generate_script_button.click(
151
  fn=lambda user_prompt, model_id, duration: generate_script(user_prompt, model_id, hf_token, duration),
152
  inputs=[user_prompt, llama_model_id, duration],
153
+ outputs=[script_output, music_suggestion_output],
154
  )
155
 
 
156
  with gr.Tab("Step 2: Generate Voice"):
157
  with gr.Row():
158
  speaker = gr.Textbox(label="Voice Style (optional)", placeholder="E.g., male, female, or neutral.")
 
166
  outputs=[voice_output],
167
  )
168
 
 
169
  with gr.Tab("Step 3: Generate Music"):
170
  with gr.Row():
171
  audio_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
 
 
 
 
 
172
 
173
  generate_music_button = gr.Button("Generate Music")
174
  music_output = gr.Audio(label="Generated Music", type="filepath")
175
 
176
  generate_music_button.click(
177
+ fn=lambda music_suggestion, audio_length: generate_music(music_suggestion, audio_length),
178
+ inputs=[music_suggestion_output, audio_length],
179
  outputs=[music_output],
180
  )
181
 
 
199
  </p>
200
  """)
201
 
202
+ demo.launch(debug=True)