File size: 5,095 Bytes
d448add
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import streamlit as st
from transformers import AutoProcessor, MusicgenForConditionalGeneration
import scipy.io.wavfile
import openai
import torch


# Streamlit app setup
st.set_page_config(
    page_icon="https://soundboard.bilsimaging.com/faviconbilsimaging.png",
    layout="wide",
    page_title="Radio Imaging Audio Generator Beta 0.1",
    initial_sidebar_state="expanded",
)

# App Header
st.markdown("""
    <h1 style=''>Radio Imaging Audio Generator 
    <span style='font-size: 24px; color: #FDC74A;'>Beta 0.1</span></h1>
    """, unsafe_allow_html=True)
st.write("Welcome to the Radio Imaging & MusicGen AI audio generator. Easily create unique audio for your radio imaging projects or for music creation using cutting-edge AI technology.")
st.markdown("---")

# Instructions Section
with st.expander("πŸ“˜ How to Use This Web App"):
    st.markdown("""
        1. **Enter OpenAI API Key**: Provide your API key in the sidebar to access the GPT model.
        2. **Select GPT Model**: Choose the desired model, such as `gpt-3.5-turbo-16k`.
        3. **Write a Description**: Provide a detailed description of your desired audio.
        4. **Generate and Review the Prompt**: Generate a description and review the output.
        5. **Generate Audio**: Use the description to create your audio file.
        6. **Playback and Download**: Listen to or download the generated audio.
    """)

# Sidebar Inputs
with st.sidebar:
    openai_api_key = st.text_input("πŸ”‘ OpenAI API Key", type="password", help="Enter your OpenAI API key.")
    st.caption("Need an API key? Get one [here](https://platform.openai.com/account/api-keys).")
    model = st.selectbox("πŸ›  Choose GPT Model", options=("gpt-3.5-turbo", "gpt-3.5-turbo-16k"))

# Prompt Input
st.markdown("## ✍🏻 Write Your Description")
prompt = st.text_area(
    "Describe the audio you'd like to generate.", 
    help="Include details like mood, instruments, style, or purpose (e.g., calm background music for a morning show)."
)

# Generate Prompt
if st.button("πŸ“„ Generate Prompt"):
    if not openai_api_key.strip() or not prompt.strip():
        st.error("Please provide both an OpenAI API key and a description.")
    else:
        with st.spinner("Generating your prompt... Please wait."):
            try:
                # Create a prompt and get response from OpenAI
                full_prompt = {"role": "user", "content": f"Describe a radio imaging audio piece based on: {prompt}"}
                response = openai.ChatCompletion.create(model=model, messages=[full_prompt], api_key=openai_api_key)
                descriptive_text = response.choices[0].message['content'].strip()

                # Append a credit line
                descriptive_text += "\n\nΒ© Created using Radio Imaging Audio Generator by Bilsimaging"

                # Save to session state
                st.session_state['generated_prompt'] = descriptive_text
                st.success("Prompt successfully generated!")
                st.write(descriptive_text)
                st.download_button("πŸ“₯ Download Prompt", descriptive_text, file_name="generated_prompt.txt")
            except Exception as e:
                st.error(f"Error while generating prompt: {e}")

st.markdown("---")

# Cache Model Loading
@st.cache_resource
def load_model():
    """Load and cache the MusicGen model and processor."""
    model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
    processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
    return model, processor

# Generate Audio
if st.button("β–Ά Generate Audio"):
    if 'generated_prompt' not in st.session_state or not st.session_state['generated_prompt']:
        st.error("Please generate and approve a prompt before creating audio.")
    else:
        descriptive_text = st.session_state['generated_prompt']
        with st.spinner("Generating your audio... This might take a few moments."):
            try:
                # Load model and processor
                musicgen_model, processor = load_model()

                # Generate audio from the prompt
                inputs = processor(text=[descriptive_text], padding=True, return_tensors="pt")
                audio_values = musicgen_model.generate(**inputs, max_new_tokens=512)
                sampling_rate = musicgen_model.config.audio_encoder.sampling_rate

                # Save and display the audio
                audio_filename = "Bilsimaging_radio_imaging_output.wav"
                scipy.io.wavfile.write(audio_filename, rate=sampling_rate, data=audio_values[0, 0].numpy())
                st.success("Audio successfully generated!")
                st.audio(audio_filename)
            except Exception as e:
                st.error(f"Error while generating audio: {e}")

# Footer Section
st.markdown("---")
st.markdown("""
    βœ”οΈ Made with ❀️ by [Bilsimaging](https://bilsimaging.com). Your feedback and support help us grow!
    """)
st.markdown("<style>#MainMenu {visibility: hidden;} footer {visibility: hidden;}</style>", unsafe_allow_html=True)