Bils commited on
Commit
1808e7a
Β·
verified Β·
1 Parent(s): fd8d42a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -15
app.py CHANGED
@@ -8,6 +8,7 @@ from transformers import (
8
  AutoProcessor,
9
  MusicgenForConditionalGeneration,
10
  )
 
11
  from scipy.io.wavfile import write
12
  from pydub import AudioSegment
13
  from dotenv import load_dotenv
@@ -86,24 +87,36 @@ def generate_voice(script: str, speaker: str = "default"):
86
  # Music Generation Function
87
  # ---------------------------------------------------------------------
88
  @spaces.GPU(duration=300)
89
- def generate_music(prompt: str, audio_length: int):
90
  try:
91
- musicgen_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
92
- musicgen_processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
 
93
 
94
- device = "cuda" if torch.cuda.is_available() else "cpu"
95
- musicgen_model.to(device)
 
 
96
 
97
- inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
98
- outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
 
99
 
100
- audio_data = outputs[0, 0].cpu().numpy()
101
- normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
102
 
103
- output_path = f"{tempfile.gettempdir()}/generated_music.wav"
104
- write(output_path, 44100, normalized_audio)
105
 
106
- return output_path
 
 
 
 
 
 
 
 
107
  except Exception as e:
108
  return f"Error generating music: {e}"
109
 
@@ -131,7 +144,7 @@ def blend_audio(voice_path: str, music_path: str, ducking: bool):
131
  # ---------------------------------------------------------------------
132
  with gr.Blocks() as demo:
133
  gr.Markdown("""
134
- # 🎧 AI Promo Studio πŸš€
135
  Generate scripts, sound design, and music suggestions with ease.
136
  """)
137
 
@@ -154,6 +167,7 @@ with gr.Blocks() as demo:
154
  outputs=[script_output, sound_design_output, music_suggestion_output],
155
  )
156
 
 
157
  with gr.Tab("Step 2: Generate Voice"):
158
  with gr.Row():
159
  speaker = gr.Textbox(label="Voice Style (optional)", placeholder="E.g., male, female, or neutral.")
@@ -167,16 +181,22 @@ with gr.Blocks() as demo:
167
  outputs=[voice_output],
168
  )
169
 
 
170
  with gr.Tab("Step 3: Generate Music"):
171
  with gr.Row():
172
  audio_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
 
 
 
 
 
173
 
174
  generate_music_button = gr.Button("Generate Music")
175
  music_output = gr.Audio(label="Generated Music", type="filepath")
176
 
177
  generate_music_button.click(
178
- fn=lambda music_suggestion, audio_length: generate_music(music_suggestion, audio_length),
179
- inputs=[music_suggestion_output, audio_length],
180
  outputs=[music_output],
181
  )
182
 
 
8
  AutoProcessor,
9
  MusicgenForConditionalGeneration,
10
  )
11
+ from diffusers import StableDiffusionPipeline
12
  from scipy.io.wavfile import write
13
  from pydub import AudioSegment
14
  from dotenv import load_dotenv
 
87
  # Music Generation Function
88
  # ---------------------------------------------------------------------
89
  @spaces.GPU(duration=300)
90
+ def generate_music(prompt: str, audio_length: int, model_choice: str):
91
  try:
92
+ if model_choice == "Stable Audio Open 1.0":
93
+ stable_pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-audio-open-1.0")
94
+ stable_pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
95
 
96
+ audio = stable_pipeline(prompt, num_inference_steps=50, guidance_scale=7.5)
97
+ output_path = f"{tempfile.gettempdir()}/stable_generated_music.wav"
98
+ write(output_path, 44100, audio["sample"].cpu().numpy())
99
+ return output_path
100
 
101
+ elif model_choice == "MusicGen":
102
+ musicgen_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
103
+ musicgen_processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
104
 
105
+ device = "cuda" if torch.cuda.is_available() else "cpu"
106
+ musicgen_model.to(device)
107
 
108
+ inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
109
+ outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
110
 
111
+ audio_data = outputs[0, 0].cpu().numpy()
112
+ normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
113
+
114
+ output_path = f"{tempfile.gettempdir()}/musicgen_generated_music.wav"
115
+ write(output_path, 44100, normalized_audio)
116
+ return output_path
117
+
118
+ else:
119
+ return "Invalid model choice!"
120
  except Exception as e:
121
  return f"Error generating music: {e}"
122
 
 
144
  # ---------------------------------------------------------------------
145
  with gr.Blocks() as demo:
146
  gr.Markdown("""
147
+ # 🎡 AI Promo Studio πŸš€
148
  Generate scripts, sound design, and music suggestions with ease.
149
  """)
150
 
 
167
  outputs=[script_output, sound_design_output, music_suggestion_output],
168
  )
169
 
170
+ # Step 2: Generate Voice
171
  with gr.Tab("Step 2: Generate Voice"):
172
  with gr.Row():
173
  speaker = gr.Textbox(label="Voice Style (optional)", placeholder="E.g., male, female, or neutral.")
 
181
  outputs=[voice_output],
182
  )
183
 
184
+ # Step 3: Generate Music
185
  with gr.Tab("Step 3: Generate Music"):
186
  with gr.Row():
187
  audio_length = gr.Slider(label="Music Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
188
+ model_choice = gr.Dropdown(
189
+ label="Select Music Generation Model",
190
+ choices=["Stable Audio Open 1.0", "MusicGen"],
191
+ value="Stable Audio Open 1.0"
192
+ )
193
 
194
  generate_music_button = gr.Button("Generate Music")
195
  music_output = gr.Audio(label="Generated Music", type="filepath")
196
 
197
  generate_music_button.click(
198
+ fn=lambda music_suggestion, audio_length, model_choice: generate_music(music_suggestion, audio_length, model_choice),
199
+ inputs=[music_suggestion_output, audio_length, model_choice],
200
  outputs=[music_output],
201
  )
202