Bils commited on
Commit
c243adb
·
verified ·
1 Parent(s): ce7b644

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -203
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import streamlit as st
 
2
  import torch
3
  import scipy.io.wavfile
4
- import requests
5
- from io import BytesIO
6
  from transformers import (
7
  AutoTokenizer,
8
  AutoModelForCausalLM,
@@ -10,200 +9,243 @@ from transformers import (
10
  AutoProcessor,
11
  MusicgenForConditionalGeneration
12
  )
 
13
  from streamlit_lottie import st_lottie # pip install streamlit-lottie
14
 
15
  # ---------------------------------------------------------------------
16
- # 1) Page Configuration
17
  # ---------------------------------------------------------------------
18
  st.set_page_config(
19
- page_title="Modern Radio Imaging Generator - Llama 3 & MusicGen",
20
  page_icon="🎧",
21
  layout="wide"
22
  )
23
 
24
  # ---------------------------------------------------------------------
25
- # 2) Custom CSS for a Sleek, Modern Look
26
  # ---------------------------------------------------------------------
27
- MODERN_CSS = """
28
  <style>
29
- /* Body styling */
30
  body {
31
- background: linear-gradient(to bottom right, #ffffff, #f3f4f6);
32
- font-family: 'Helvetica Neue', Arial, sans-serif;
33
- color: #1F2937;
34
  }
35
 
36
- /* Make the container narrower for a sleek look */
37
  .block-container {
38
  max-width: 1100px;
 
39
  }
40
 
41
- /* Heading style */
42
- h1, h2, h3, h4, h5, h6 {
43
- color: #3B82F6;
44
- margin-bottom: 0.5em;
45
  }
46
 
47
- /* Buttons */
48
  .stButton>button {
49
- background-color: #3B82F6 !important;
50
  color: #FFFFFF !important;
51
- border-radius: 0.8rem !important;
52
- font-size: 1rem !important;
 
53
  padding: 0.6rem 1.2rem !important;
 
 
 
 
54
  }
55
 
56
- /* Sidebar customization */
57
  .sidebar .sidebar-content {
58
- background: #E0F2FE;
 
59
  }
60
 
61
- /* Text input areas */
62
  textarea, input, select {
63
- border-radius: 0.5rem !important;
 
 
 
64
  }
65
 
66
- /* Animate some elements on hover (just an example) */
67
- .stButton>button:hover {
68
- background-color: #2563EB !important;
69
- transition: background-color 0.3s ease-in-out;
70
  }
71
 
72
- /* Lottie container style */
73
  .lottie-container {
74
  display: flex;
75
  justify-content: center;
76
- margin: 1rem 0;
77
  }
78
 
79
- /* Footer note */
80
  .footer-note {
81
  text-align: center;
82
- opacity: 0.7;
83
  font-size: 14px;
 
84
  margin-top: 2rem;
85
  }
86
 
87
- /* Hide default Streamlit branding if desired */
88
  #MainMenu, footer {visibility: hidden;}
89
  </style>
90
  """
91
- st.markdown(MODERN_CSS, unsafe_allow_html=True)
92
 
93
  # ---------------------------------------------------------------------
94
- # 3) Lottie Animation Loader
95
  # ---------------------------------------------------------------------
96
  @st.cache_data
97
  def load_lottie_url(url: str):
98
  """
99
- Loads a Lottie animation JSON from a given URL.
100
  """
101
  r = requests.get(url)
102
  if r.status_code != 200:
103
  return None
104
  return r.json()
105
 
106
- # Example Lottie animations (feel free to replace with your own):
107
- LOTTIE_URL_HEADER = "https://assets1.lottiefiles.com/packages/lf20_amhnytsm.json" # music-themed animation
108
- lottie_music = load_lottie_url(LOTTIE_URL_HEADER)
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  # ---------------------------------------------------------------------
111
- # 4) Header & Intro with a Lottie Animation
112
  # ---------------------------------------------------------------------
113
- col_header1, col_header2 = st.columns([3, 2], gap="medium")
 
 
 
 
114
 
115
- with col_header1:
116
  st.markdown(
117
  """
118
- <h1>🎙 Radio Imaging Generator (Beta)</h1>
119
- <p style='font-size:18px;'>
120
- Create catchy radio promos, ads, and station jingles with
121
- a modern UI, Llama 3 text generation, and MusicGen audio!
122
- </p>
123
- """,
124
- unsafe_allow_html=True
125
  )
126
- with col_header2:
127
- if lottie_music:
128
  with st.container():
129
- st_lottie(lottie_music, height=180, key="header_lottie")
130
  else:
131
- # Fallback if Lottie fails to load
132
- st.markdown("*(Animation unavailable)*")
133
 
134
  st.markdown("---")
135
 
136
  # ---------------------------------------------------------------------
137
- # 5) Explanation in an Expander
138
  # ---------------------------------------------------------------------
139
- with st.expander("📘 How to Use This App"):
140
- st.markdown(
141
- """
142
- **Steps**:
143
- 1. **Model & Language**: In the sidebar, choose the Llama model ID (e.g. a real Llama 2) and the device.
144
- 2. **Enter Concept**: Provide a short description of the ad or jingle you want.
145
- 3. **Refine**: Click on "Refine with Llama 3" to get a polished script in your chosen language or style.
146
- 4. **Generate Audio**: Use MusicGen to create a short audio snippet from that refined script.
147
- 5. **Listen & Download**: Enjoy or download the result as a WAV file.
148
 
149
- **Note**:
150
- - If "Llama 3.3" doesn't exist, you'll get errors. Use a real model from [Hugging Face](https://huggingface.co/models)
151
- like `meta-llama/Llama-2-7b-chat-hf`.
152
- - Some large models require GPU (or specialized hardware) for feasible speeds.
153
- - This example uses [streamlit-lottie](https://github.com/andfanilo/streamlit-lottie) for animation.
154
- """
155
- )
156
 
157
- # ---------------------------------------------------------------------
158
- # 6) Sidebar Configuration
159
- # ---------------------------------------------------------------------
160
- with st.sidebar:
161
- st.header("🔧 Llama 3 & Audio Settings")
162
-
163
- # Model input
164
  llama_model_id = st.text_input(
165
- "Llama Model ID",
166
- value="meta-llama/Llama-3.3-70B-Instruct", # Fictitious, please replace with a real model
167
- help="Replace with a real model, e.g. meta-llama/Llama-2-7b-chat-hf"
168
  )
169
-
170
  device_option = st.selectbox(
171
- "Hardware Device",
172
  ["auto", "cpu"],
173
- index=0,
174
- help="If local GPU is available, choose 'auto'. CPU might be slow for large models."
175
  )
176
-
177
- # Multi-language or style
178
- language_choice = st.selectbox(
179
- "Choose Language",
180
- ["English", "Spanish", "French", "German", "Other (describe in prompt)"]
181
- )
182
-
183
- # Music style & max tokens
184
- music_style = st.selectbox(
185
- "Preferred Music Style",
186
- ["Pop", "Rock", "Electronic", "Classical", "Hip-Hop", "Reggae", "Ambient", "Other"]
187
- )
188
- audio_tokens = st.slider("MusicGen Max Tokens (Track Length)", 128, 1024, 512, 64)
189
 
190
  # ---------------------------------------------------------------------
191
- # 7) Prompt for the Radio Imaging Concept
192
  # ---------------------------------------------------------------------
193
- st.markdown("## ✍️ Your Radio Concept")
194
- prompt = st.text_area(
195
- "Describe the theme, audience, length, energy level, etc.",
196
- placeholder="E.g. 'A high-energy 10-second pop jingle for a morning radio show...'"
197
- )
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
  # ---------------------------------------------------------------------
200
- # 8) Load Llama Pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  # ---------------------------------------------------------------------
202
  @st.cache_resource
203
  def load_llama_pipeline(model_id: str, device: str):
204
  """
205
- Loads the specified Llama or other HF model as a text-generation pipeline.
206
- This references a hypothetical Llama 3.3.
207
  """
208
  tokenizer = AutoTokenizer.from_pretrained(model_id)
209
  model = AutoModelForCausalLM.from_pretrained(
@@ -211,127 +253,57 @@ def load_llama_pipeline(model_id: str, device: str):
211
  torch_dtype=torch.float16 if device == "auto" else torch.float32,
212
  device_map=device
213
  )
214
- pipe = pipeline(
215
  "text-generation",
216
  model=model,
217
  tokenizer=tokenizer,
218
  device_map=device
219
  )
220
- return pipe
221
 
222
- def refine_description_with_llama(user_prompt: str, pipeline_llama, lang: str):
223
  """
224
- Create a polished script using Llama.
225
- Incorporate a language preference or style instructions.
226
  """
227
- system_msg = (
228
- "You are an expert radio imaging script writer. "
229
- "Refine the user's concept into a concise, compelling piece. "
230
- "Ensure to reflect any language or style requests."
231
  )
232
- combined = f"{system_msg}\nLanguage: {lang}\nUser Concept: {user_prompt}\nRefined Script:"
233
 
234
- result = pipeline_llama(
235
- combined,
236
- max_new_tokens=300,
237
  do_sample=True,
238
- temperature=0.8
239
- )
240
- text = result[0]["generated_text"]
241
 
242
- # Attempt to isolate the final portion
243
- if "Refined Script:" in text:
244
- text = text.split("Refined Script:")[-1].strip()
 
245
 
246
- text += "\n\n(Generated with Llama 3 - Modern Radio Generator)"
247
- return text
248
-
249
- # ---------------------------------------------------------------------
250
- # 9) Buttons & Outputs
251
- # ---------------------------------------------------------------------
252
- col_gen1, col_gen2 = st.columns(2)
253
 
254
- with col_gen1:
255
- if st.button("📄 Refine with Llama 3"):
256
- if not prompt.strip():
257
- st.error("Please provide a brief concept first.")
258
- else:
259
- with st.spinner("Refining your script..."):
260
- try:
261
- pipeline_llama = load_llama_pipeline(llama_model_id, device_option)
262
- refined_text = refine_description_with_llama(prompt, pipeline_llama, language_choice)
263
- st.session_state['refined_prompt'] = refined_text
264
- st.success("Refined text generated!")
265
- st.write(refined_text)
266
- st.download_button(
267
- "💾 Download Script",
268
- refined_text,
269
- file_name="refined_jingle_script.txt"
270
- )
271
- except Exception as e:
272
- st.error(f"Error: {e}")
273
-
274
- with col_gen2:
275
- if st.button("▶ Generate Audio with MusicGen"):
276
- if 'refined_prompt' not in st.session_state or not st.session_state['refined_prompt']:
277
- st.error("No refined prompt found. Please generate/refine your script first.")
278
- else:
279
- final_text_for_music = st.session_state['refined_prompt']
280
- final_text_for_music += f"\nPreferred style: {music_style}"
281
- with st.spinner("Generating audio..."):
282
- try:
283
- mg_model, mg_processor = None, None
284
-
285
- # Load MusicGen model once
286
- mg_model, mg_processor = load_musicgen_model()
287
-
288
- inputs = mg_processor(
289
- text=[final_text_for_music],
290
- padding=True,
291
- return_tensors="pt"
292
- )
293
- audio_output = mg_model.generate(**inputs, max_new_tokens=audio_tokens)
294
- sr = mg_model.config.audio_encoder.sampling_rate
295
-
296
- audio_filename = f"radio_imaging_{music_style.lower()}.wav"
297
- scipy.io.wavfile.write(
298
- audio_filename,
299
- rate=sr,
300
- data=audio_output[0, 0].numpy()
301
- )
302
- st.success("Audio generated! Listen below:")
303
- st.audio(audio_filename)
304
-
305
- # Optional Save/Upload prompt
306
- if st.checkbox("Upload this WAV to a cloud (demo)?"):
307
- with st.spinner("Uploading..."):
308
- # Placeholder for your own S3 or cloud logic
309
- st.success("Uploaded (placeholder).")
310
- except Exception as e:
311
- st.error(f"Error generating audio: {e}")
312
-
313
- # ---------------------------------------------------------------------
314
- # 10) Load & Cache MusicGen
315
- # ---------------------------------------------------------------------
316
  @st.cache_resource
317
  def load_musicgen_model():
318
  """
319
- Load and cache the MusicGen model & processor.
320
- Using 'facebook/musicgen-small' as example.
321
  """
322
- mgm = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
323
- mgp = AutoProcessor.from_pretrained("facebook/musicgen-small")
324
- return mgm, mgp
325
 
326
  # ---------------------------------------------------------------------
327
- # 11) Footer
328
  # ---------------------------------------------------------------------
329
  st.markdown("---")
330
  st.markdown(
331
  """
332
- <div class='footer-note'>
333
- © 2025 Modern Radio Generator - Built with Llama & MusicGen |
334
- <a href='https://example.com' target='_blank'>YourCompany</a>
335
  </div>
336
  """,
337
  unsafe_allow_html=True
 
1
  import streamlit as st
2
+ import requests
3
  import torch
4
  import scipy.io.wavfile
 
 
5
  from transformers import (
6
  AutoTokenizer,
7
  AutoModelForCausalLM,
 
9
  AutoProcessor,
10
  MusicgenForConditionalGeneration
11
  )
12
+ from io import BytesIO
13
  from streamlit_lottie import st_lottie # pip install streamlit-lottie
14
 
15
  # ---------------------------------------------------------------------
16
+ # 1) PAGE CONFIG
17
  # ---------------------------------------------------------------------
18
  st.set_page_config(
19
+ page_title="Radio Imaging AI MVP",
20
  page_icon="🎧",
21
  layout="wide"
22
  )
23
 
24
  # ---------------------------------------------------------------------
25
+ # 2) CUSTOM CSS / SPOTIFY-LIKE UI
26
  # ---------------------------------------------------------------------
27
+ CUSTOM_CSS = """
28
  <style>
29
+ /* Body styling for a dark, music-app vibe */
30
  body {
31
+ background-color: #121212;
32
+ color: #FFFFFF;
33
+ font-family: "Helvetica Neue", sans-serif;
34
  }
35
 
36
+ /* Main container width */
37
  .block-container {
38
  max-width: 1100px;
39
+ padding: 1rem 1.5rem;
40
  }
41
 
42
+ /* Headings with a neon-ish green accent */
43
+ h1, h2, h3 {
44
+ color: #1DB954;
45
+ margin-bottom: 0.5rem;
46
  }
47
 
48
+ /* Buttons: rounded, bright Spotify-like green on hover */
49
  .stButton>button {
50
+ background-color: #1DB954 !important;
51
  color: #FFFFFF !important;
52
+ border-radius: 24px;
53
+ border: none;
54
+ font-size: 16px !important;
55
  padding: 0.6rem 1.2rem !important;
56
+ transition: background-color 0.3s ease;
57
+ }
58
+ .stButton>button:hover {
59
+ background-color: #1ed760 !important;
60
  }
61
 
62
+ /* Sidebar: black background, white text */
63
  .sidebar .sidebar-content {
64
+ background-color: #000000;
65
+ color: #FFFFFF;
66
  }
67
 
68
+ /* Text inputs and text areas */
69
  textarea, input, select {
70
+ border-radius: 8px !important;
71
+ background-color: #282828 !important;
72
+ color: #FFFFFF !important;
73
+ border: 1px solid #3e3e3e;
74
  }
75
 
76
+ /* Audio player styling */
77
+ audio {
78
+ width: 100%;
79
+ margin-top: 1rem;
80
  }
81
 
82
+ /* Lottie container styling */
83
  .lottie-container {
84
  display: flex;
85
  justify-content: center;
86
+ margin-bottom: 20px;
87
  }
88
 
89
+ /* Footer styling */
90
  .footer-note {
91
  text-align: center;
 
92
  font-size: 14px;
93
+ opacity: 0.7;
94
  margin-top: 2rem;
95
  }
96
 
97
+ /* Hide Streamlit's default branding if desired */
98
  #MainMenu, footer {visibility: hidden;}
99
  </style>
100
  """
101
+ st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
102
 
103
  # ---------------------------------------------------------------------
104
+ # 3) HELPER: LOAD LOTTIE ANIMATION
105
  # ---------------------------------------------------------------------
106
  @st.cache_data
107
  def load_lottie_url(url: str):
108
  """
109
+ Fetch Lottie JSON for animations.
110
  """
111
  r = requests.get(url)
112
  if r.status_code != 200:
113
  return None
114
  return r.json()
115
 
116
+ # Example Lottie animation (radio waves / music eq, etc.)
117
+ LOTTIE_URL = "https://assets3.lottiefiles.com/temp/lf20_Q6h5zV.json"
118
+ lottie_animation = load_lottie_url(LOTTIE_URL)
119
+
120
+ # ---------------------------------------------------------------------
121
+ # 4) SIDEBAR: "LIBRARY" NAVIGATION (MIMICS SPOTIFY)
122
+ # ---------------------------------------------------------------------
123
+ with st.sidebar:
124
+ st.header("🎚 Radio Library")
125
+ st.write("**My Stations**")
126
+ st.write("- Favorites")
127
+ st.write("- Recently Generated")
128
+ st.write("- Top Hits")
129
+ st.write("---")
130
+ st.write("**Settings**")
131
+ st.markdown("<br>", unsafe_allow_html=True)
132
 
133
  # ---------------------------------------------------------------------
134
+ # 5) HEADER SECTION WITH LOTS OF FLARE
135
  # ---------------------------------------------------------------------
136
+ col1, col2 = st.columns([3, 2], gap="large")
137
+
138
+ with col1:
139
+ st.title("AI Radio Imaging MVP")
140
+ st.subheader("Llama-Driven Promo Scripts, MusicGen Audio")
141
 
 
142
  st.markdown(
143
  """
144
+ Create **radio imaging promos** and **jingles** with a minimal but creative MVP.
145
+ This app:
146
+ - Uses a (hypothetical) [Llama 3] model for **script generation**.
147
+ - Uses Meta's [MusicGen](https://github.com/facebookresearch/audiocraft) for **audio**.
148
+ - Features a Spotify-like UI & Lottie animations for a modern user experience.
149
+ """
 
150
  )
151
+ with col2:
152
+ if lottie_animation:
153
  with st.container():
154
+ st_lottie(lottie_animation, height=180, loop=True, key="radio_lottie")
155
  else:
156
+ st.write("*No animation loaded.*")
 
157
 
158
  st.markdown("---")
159
 
160
  # ---------------------------------------------------------------------
161
+ # 6) PROMPT INPUT & MODEL SELECTION
162
  # ---------------------------------------------------------------------
163
+ st.subheader("🎙 Step 1: Briefly Describe Your Promo Idea")
 
 
 
 
 
 
 
 
164
 
165
+ prompt = st.text_area(
166
+ "E.g. 'A 15-second upbeat jingle with a catchy hook for a Top 40 morning show'",
167
+ height=120
168
+ )
 
 
 
169
 
170
+ col_model, col_device = st.columns(2)
171
+ with col_model:
 
 
 
 
 
172
  llama_model_id = st.text_input(
173
+ "Llama Model (Hugging Face ID)",
174
+ value="meta-llama/Llama-3.3-70B-Instruct", # Replace with a real model
175
+ help="If non-existent, you'll see errors. Try Llama 2 (e.g. meta-llama/Llama-2-7b-chat-hf)."
176
  )
177
+ with col_device:
178
  device_option = st.selectbox(
179
+ "Choose Device",
180
  ["auto", "cpu"],
181
+ help="For GPU usage, pick 'auto'. CPU can be slow for big models."
 
182
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  # ---------------------------------------------------------------------
185
+ # 7) BUTTON: GENERATE RADIO SCRIPT WITH LLAMA
186
  # ---------------------------------------------------------------------
187
+ if st.button("📝 Generate Promo Script"):
188
+ if not prompt.strip():
189
+ st.error("Please enter a radio imaging concept first.")
190
+ else:
191
+ with st.spinner("Generating script..."):
192
+ try:
193
+ # Load Llama pipeline
194
+ pipeline_llama = load_llama_pipeline(llama_model_id, device_option)
195
+ # Generate refined script
196
+ refined_text = generate_radio_script(prompt, pipeline_llama)
197
+ st.session_state["refined_script"] = refined_text
198
+ st.success("Promo script generated!")
199
+ st.write(refined_text)
200
+ except Exception as e:
201
+ st.error(f"Error during Llama generation: {e}")
202
+
203
+ st.markdown("---")
204
 
205
  # ---------------------------------------------------------------------
206
+ # 8) AUDIO GENERATION: MUSICGEN
207
+ # ---------------------------------------------------------------------
208
+ st.subheader("🎶 Step 2: Generate Your Radio Audio")
209
+
210
+ audio_tokens = st.slider("MusicGen Max Tokens (Track Length)", 128, 1024, 512, 64)
211
+
212
+ if st.button("🎧 Create Audio with MusicGen"):
213
+ # Check if we have a refined script
214
+ if "refined_script" not in st.session_state:
215
+ st.error("Please generate a promo script first.")
216
+ else:
217
+ with st.spinner("Generating audio..."):
218
+ try:
219
+ # Load MusicGen
220
+ mg_model, mg_processor = load_musicgen_model()
221
+ descriptive_text = st.session_state["refined_script"]
222
+
223
+ # Prepare model input
224
+ inputs = mg_processor(
225
+ text=[descriptive_text],
226
+ return_tensors="pt",
227
+ padding=True
228
+ )
229
+ # Generate audio
230
+ audio_values = mg_model.generate(**inputs, max_new_tokens=audio_tokens)
231
+ sr = mg_model.config.audio_encoder.sampling_rate
232
+
233
+ # Save audio to WAV
234
+ out_filename = "radio_imaging_output.wav"
235
+ scipy.io.wavfile.write(out_filename, rate=sr, data=audio_values[0,0].numpy())
236
+
237
+ st.success("Audio created! Press play to listen:")
238
+ st.audio(out_filename)
239
+ except Exception as e:
240
+ st.error(f"Error generating audio: {e}")
241
+
242
+ # ---------------------------------------------------------------------
243
+ # 9) HELPER FUNCTIONS
244
  # ---------------------------------------------------------------------
245
  @st.cache_resource
246
  def load_llama_pipeline(model_id: str, device: str):
247
  """
248
+ Load the Llama model & pipeline.
 
249
  """
250
  tokenizer = AutoTokenizer.from_pretrained(model_id)
251
  model = AutoModelForCausalLM.from_pretrained(
 
253
  torch_dtype=torch.float16 if device == "auto" else torch.float32,
254
  device_map=device
255
  )
256
+ text_gen_pipeline = pipeline(
257
  "text-generation",
258
  model=model,
259
  tokenizer=tokenizer,
260
  device_map=device
261
  )
262
+ return text_gen_pipeline
263
 
264
+ def generate_radio_script(user_input: str, pipeline_llama) -> str:
265
  """
266
+ Use Llama to refine the user's input into a brief but creative radio imaging script.
 
267
  """
268
+ system_prompt = (
269
+ "You are a top-tier radio imaging producer. "
270
+ "Take the user's concept and craft a short, high-impact promo script. "
271
+ "Include style, tone, and potential CTA if relevant."
272
  )
273
+ full_prompt = f"{system_prompt}\nUser concept: {user_input}\nRefined script:"
274
 
275
+ output = pipeline_llama(
276
+ full_prompt,
277
+ max_new_tokens=200,
278
  do_sample=True,
279
+ temperature=0.9
280
+ )[0]["generated_text"]
 
281
 
282
+ # Attempt to isolate the final script portion
283
+ if "Refined script:" in output:
284
+ output = output.split("Refined script:", 1)[-1].strip()
285
+ output += "\n\n(Generated by Llama in Radio Imaging MVP)"
286
 
287
+ return output
 
 
 
 
 
 
288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  @st.cache_resource
290
  def load_musicgen_model():
291
  """
292
+ Load MusicGen (small version).
 
293
  """
294
+ mg_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
295
+ mg_processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
296
+ return mg_model, mg_processor
297
 
298
  # ---------------------------------------------------------------------
299
+ # 10) FOOTER
300
  # ---------------------------------------------------------------------
301
  st.markdown("---")
302
  st.markdown(
303
  """
304
+ <div class="footer-note">
305
+ &copy; 2025 Radio Imaging MVP &ndash; Built with Llama & MusicGen. <br>
306
+ Inspired by Spotify's UI for a sleek, modern experience.
307
  </div>
308
  """,
309
  unsafe_allow_html=True