shukdevdatta123 commited on
Commit
c32d1e4
·
verified ·
1 Parent(s): e2170c7

Create v2.txt

Browse files
Files changed (1) hide show
  1. v2.txt +308 -0
v2.txt ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import tempfile
3
+ import os
4
+ import requests
5
+ import gradio as gr
6
+ from openai import OpenAI
7
+
8
+ # Available voices for audio generation
9
+ VOICES = ["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
10
+
11
+ def process_text_input(api_key, text_prompt, selected_voice):
12
+ """Generate audio response from text input"""
13
+ try:
14
+ # Initialize OpenAI client with the provided API key
15
+ client = OpenAI(api_key=api_key)
16
+
17
+ completion = client.chat.completions.create(
18
+ model="gpt-4o-audio-preview",
19
+ modalities=["text", "audio"],
20
+ audio={"voice": selected_voice, "format": "wav"},
21
+ messages=[
22
+ {
23
+ "role": "user",
24
+ "content": text_prompt
25
+ }
26
+ ]
27
+ )
28
+
29
+ # Save the audio to a temporary file
30
+ wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
31
+ temp_path = tempfile.mktemp(suffix=".wav")
32
+ with open(temp_path, "wb") as f:
33
+ f.write(wav_bytes)
34
+
35
+ # Get the text response directly from the API
36
+ text_response = completion.choices[0].message.content
37
+
38
+ return text_response, temp_path
39
+ except Exception as e:
40
+ return f"Error: {str(e)}", None
41
+
42
+ def process_audio_input(api_key, audio_path, text_prompt, selected_voice):
43
+ """Process audio input and generate a response"""
44
+ try:
45
+ if not audio_path:
46
+ return "Please upload or record audio first.", None
47
+
48
+ # Initialize OpenAI client with the provided API key
49
+ client = OpenAI(api_key=api_key)
50
+
51
+ # Read audio file and encode to base64
52
+ with open(audio_path, "rb") as audio_file:
53
+ audio_data = audio_file.read()
54
+ encoded_audio = base64.b64encode(audio_data).decode('utf-8')
55
+
56
+ # Create message content with both text and audio
57
+ message_content = []
58
+
59
+ if text_prompt:
60
+ message_content.append({
61
+ "type": "text",
62
+ "text": text_prompt
63
+ })
64
+
65
+ message_content.append({
66
+ "type": "input_audio",
67
+ "input_audio": {
68
+ "data": encoded_audio,
69
+ "format": "wav"
70
+ }
71
+ })
72
+
73
+ # Call OpenAI API
74
+ completion = client.chat.completions.create(
75
+ model="gpt-4o-audio-preview",
76
+ modalities=["text", "audio"],
77
+ audio={"voice": selected_voice, "format": "wav"},
78
+ messages=[
79
+ {
80
+ "role": "user",
81
+ "content": message_content
82
+ }
83
+ ]
84
+ )
85
+
86
+ # Save the audio response
87
+ wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
88
+ temp_path = tempfile.mktemp(suffix=".wav")
89
+ with open(temp_path, "wb") as f:
90
+ f.write(wav_bytes)
91
+
92
+ # Get the text response
93
+ text_response = completion.choices[0].message.content
94
+
95
+ return text_response, temp_path
96
+ except Exception as e:
97
+ return f"Error: {str(e)}", None
98
+
99
+ def transcribe_audio(api_key, audio_path):
100
+ """Transcribe an audio file using OpenAI's API"""
101
+ try:
102
+ if not audio_path:
103
+ return "No audio file provided for transcription."
104
+
105
+ client = OpenAI(api_key=api_key)
106
+
107
+ with open(audio_path, "rb") as audio_file:
108
+ transcription = client.audio.transcriptions.create(
109
+ model="gpt-4o-transcribe",
110
+ file=audio_file
111
+ )
112
+
113
+ return transcription.text
114
+ except Exception as e:
115
+ return f"Transcription error: {str(e)}"
116
+
117
+ def download_example_audio():
118
+ """Download an example audio file for testing"""
119
+ try:
120
+ url = "https://cdn.openai.com/API/docs/audio/alloy.wav"
121
+ response = requests.get(url)
122
+ response.raise_for_status()
123
+
124
+ # Save to a temporary file
125
+ temp_path = tempfile.mktemp(suffix=".wav")
126
+ with open(temp_path, "wb") as f:
127
+ f.write(response.content)
128
+
129
+ return temp_path
130
+ except Exception as e:
131
+ return None
132
+
133
+ def use_example_audio():
134
+ """Load example audio for the interface"""
135
+ audio_path = download_example_audio()
136
+ return audio_path
137
+
138
+ # Create Gradio Interface
139
+ with gr.Blocks(title="OpenAI Audio Chat App") as app:
140
+ gr.Markdown("# OpenAI Audio Chat App")
141
+ gr.Markdown("Interact with GPT-4o audio model through text and audio inputs")
142
+
143
+ # API Key input (used across all tabs)
144
+ api_key = gr.Textbox(
145
+ label="OpenAI API Key",
146
+ placeholder="Enter your OpenAI API key here",
147
+ type="password"
148
+ )
149
+
150
+ with gr.Tab("Text to Audio"):
151
+ with gr.Row():
152
+ with gr.Column():
153
+ text_input = gr.Textbox(
154
+ label="Text Prompt",
155
+ placeholder="Enter your question or prompt here...",
156
+ lines=3
157
+ )
158
+ text_voice = gr.Dropdown(
159
+ choices=VOICES,
160
+ value="alloy",
161
+ label="Voice"
162
+ )
163
+ text_submit = gr.Button("Generate Response")
164
+
165
+ with gr.Column():
166
+ text_output = gr.Textbox(label="AI Response (Text)", lines=5)
167
+ audio_output = gr.Audio(label="AI Response (Audio)")
168
+ transcribed_output = gr.Textbox(label="Transcription of Audio Response", lines=3)
169
+
170
+ # Function to process text input and then transcribe the resulting audio
171
+ def text_input_with_transcription(api_key, text_prompt, voice):
172
+ text_response, audio_path = process_text_input(api_key, text_prompt, voice)
173
+
174
+ # Get transcription of the generated audio
175
+ if audio_path:
176
+ transcription = transcribe_audio(api_key, audio_path)
177
+ else:
178
+ transcription = "No audio generated to transcribe."
179
+
180
+ return text_response, audio_path, transcription
181
+
182
+ text_submit.click(
183
+ fn=text_input_with_transcription,
184
+ inputs=[api_key, text_input, text_voice],
185
+ outputs=[text_output, audio_output, transcribed_output]
186
+ )
187
+
188
+ with gr.Tab("Audio Input to Audio Response"):
189
+ with gr.Row():
190
+ with gr.Column():
191
+ audio_input = gr.Audio(
192
+ label="Audio Input",
193
+ type="filepath",
194
+ sources=["microphone", "upload"]
195
+ )
196
+ example_btn = gr.Button("Use Example Audio")
197
+
198
+ accompanying_text = gr.Textbox(
199
+ label="Accompanying Text (Optional)",
200
+ placeholder="Add any text context or question about the audio...",
201
+ lines=2
202
+ )
203
+ audio_voice = gr.Dropdown(
204
+ choices=VOICES,
205
+ value="alloy",
206
+ label="Response Voice"
207
+ )
208
+ audio_submit = gr.Button("Process Audio & Generate Response")
209
+
210
+ with gr.Column():
211
+ audio_text_output = gr.Textbox(label="AI Response (Text)", lines=5)
212
+ audio_audio_output = gr.Audio(label="AI Response (Audio)")
213
+ audio_transcribed_output = gr.Textbox(label="Transcription of Audio Response", lines=3)
214
+ input_transcription = gr.Textbox(label="Transcription of Input Audio", lines=3)
215
+
216
+ # Function to process audio input, generate response, and provide transcriptions
217
+ def audio_input_with_transcription(api_key, audio_path, text_prompt, voice):
218
+ # First transcribe the input audio
219
+ input_transcription = "N/A"
220
+ if audio_path:
221
+ input_transcription = transcribe_audio(api_key, audio_path)
222
+
223
+ # Process the audio input and get response
224
+ text_response, response_audio_path = process_audio_input(api_key, audio_path, text_prompt, voice)
225
+
226
+ # Transcribe the response audio
227
+ response_transcription = "No audio generated to transcribe."
228
+ if response_audio_path:
229
+ response_transcription = transcribe_audio(api_key, response_audio_path)
230
+
231
+ return text_response, response_audio_path, response_transcription, input_transcription
232
+
233
+ audio_submit.click(
234
+ fn=audio_input_with_transcription,
235
+ inputs=[api_key, audio_input, accompanying_text, audio_voice],
236
+ outputs=[audio_text_output, audio_audio_output, audio_transcribed_output, input_transcription]
237
+ )
238
+
239
+ example_btn.click(
240
+ fn=use_example_audio,
241
+ inputs=[],
242
+ outputs=[audio_input]
243
+ )
244
+
245
+ with gr.Tab("Voice Samples"):
246
+ gr.Markdown("## Listen to samples of each voice")
247
+
248
+ def generate_voice_sample(api_key, voice_type):
249
+ try:
250
+ if not api_key:
251
+ return "Please enter your OpenAI API key first.", None, "No transcription available."
252
+
253
+ client = OpenAI(api_key=api_key)
254
+ completion = client.chat.completions.create(
255
+ model="gpt-4o-audio-preview",
256
+ modalities=["text", "audio"],
257
+ audio={"voice": voice_type, "format": "wav"},
258
+ messages=[
259
+ {
260
+ "role": "user",
261
+ "content": f"This is a sample of the {voice_type} voice. It has its own unique tone and character."
262
+ }
263
+ ]
264
+ )
265
+
266
+ # Save the audio to a temporary file
267
+ wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
268
+ temp_path = tempfile.mktemp(suffix=".wav")
269
+ with open(temp_path, "wb") as f:
270
+ f.write(wav_bytes)
271
+
272
+ # Get transcription
273
+ transcription = transcribe_audio(api_key, temp_path)
274
+
275
+ return f"Sample generated with voice: {voice_type}", temp_path, transcription
276
+ except Exception as e:
277
+ return f"Error: {str(e)}", None, "No transcription available."
278
+
279
+ with gr.Row():
280
+ sample_voice = gr.Dropdown(
281
+ choices=VOICES,
282
+ value="alloy",
283
+ label="Select Voice Sample"
284
+ )
285
+ sample_btn = gr.Button("Generate Sample")
286
+
287
+ with gr.Row():
288
+ sample_text = gr.Textbox(label="Status")
289
+ sample_audio = gr.Audio(label="Voice Sample")
290
+ sample_transcription = gr.Textbox(label="Transcription", lines=3)
291
+
292
+ sample_btn.click(
293
+ fn=generate_voice_sample,
294
+ inputs=[api_key, sample_voice],
295
+ outputs=[sample_text, sample_audio, sample_transcription]
296
+ )
297
+
298
+ gr.Markdown("""
299
+ ## Notes:
300
+ - You must provide your OpenAI API key in the field above
301
+ - The model used is `gpt-4o-audio-preview` for conversation and `gpt-4o-transcribe` for transcriptions
302
+ - Audio inputs should be in WAV format
303
+ - Available voices: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, and verse
304
+ - Each audio response is automatically transcribed for verification
305
+ """)
306
+
307
+ if __name__ == "__main__":
308
+ app.launch()