sal-maq commited on
Commit
e4c39da
·
verified ·
1 Parent(s): 4565227

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -0
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import numpy as np
4
+ import gradio as gr
5
+ import whisper
6
+ from gtts import gTTS
7
+ from groq import Groq
8
+ import soundfile as sf
9
+
10
+ # Set up Groq API key
11
+ os.environ['GROQ_API_KEY'] = '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'
12
+ groq_client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
13
+
14
+ # Load Whisper model
15
+ whisper_model = whisper.load_model("base")
16
+
17
+ def process_audio(audio_file_path):
18
+ try:
19
+ # Ensure audio_file_path is valid
20
+ if not audio_file_path:
21
+ raise ValueError("No audio file provided")
22
+
23
+ print(f"Received audio file path: {audio_file_path}")
24
+
25
+ # Read the audio file from the file path
26
+ with open(audio_file_path, 'rb') as f:
27
+ audio_data = f.read()
28
+
29
+ # Save the audio data to a temporary file
30
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_audio_file:
31
+ temp_audio_path = temp_audio_file.name
32
+ temp_audio_file.write(audio_data)
33
+
34
+ # Ensure the temporary file is properly closed before processing
35
+ temp_audio_file.close()
36
+
37
+ # Transcribe audio using Whisper
38
+ result = whisper_model.transcribe(temp_audio_path)
39
+ user_text = result['text']
40
+ print(f"Transcribed text: {user_text}")
41
+
42
+ # Generate response using Llama 8b model with Groq API
43
+ chat_completion = groq_client.chat.completions.create(
44
+ messages=[
45
+ {
46
+ "role": "user",
47
+ "content": user_text,
48
+ }
49
+ ],
50
+ model="llama3-8b-8192",
51
+ )
52
+ response_text = chat_completion.choices[0].message.content
53
+ print(f"Response text: {response_text}")
54
+
55
+ # Convert response text to speech using gTTS
56
+ tts = gTTS(text=response_text, lang='en')
57
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_audio_file:
58
+ response_audio_path = temp_audio_file.name
59
+ tts.save(response_audio_path)
60
+
61
+ # Ensure the temporary file is properly closed before returning the path
62
+ temp_audio_file.close()
63
+
64
+ return response_text, response_audio_path
65
+ except Exception as e:
66
+ return f"Error: {str(e)}", None
67
+
68
+ # Create Gradio interface with updated layout
69
+ with gr.Blocks() as demo:
70
+ gr.Markdown(
71
+ """
72
+ <style>
73
+ .gradio-container {
74
+ font-family: Arial, sans-serif;
75
+ background-color: #e0f7fa; /* Changed background color */
76
+ border-radius: 10px;
77
+ padding: 20px;
78
+ box-shadow: 0 4px 12px rgba(0,0,0,0.2);
79
+ }
80
+ .gradio-input, .gradio-output {
81
+ border-radius: 6px;
82
+ border: 1px solid #ddd;
83
+ padding: 10px;
84
+ }
85
+ .gradio-button {
86
+ background-color: #28a745;
87
+ color: white;
88
+ border-radius: 6px;
89
+ border: none;
90
+ padding: 8px 16px; /* Adjusted padding */
91
+ font-size: 16px; /* Adjusted font size */
92
+ }
93
+ .gradio-button:hover {
94
+ background-color: #218838;
95
+ }
96
+ .gradio-title {
97
+ font-size: 24px;
98
+ font-weight: bold;
99
+ margin-bottom: 20px;
100
+ }
101
+ .gradio-description {
102
+ font-size: 14px;
103
+ margin-bottom: 20px;
104
+ color: #555;
105
+ }
106
+ </style>
107
+ """
108
+ )
109
+
110
+ gr.Markdown("# Voice-to-Voice Chatbot\nDeveloped by Salman Maqbool")
111
+ gr.Markdown("Upload an audio file to interact with the voice-to-voice chatbot. The chatbot will transcribe the audio, generate a response, and provide a spoken reply.")
112
+
113
+ with gr.Row():
114
+ with gr.Column():
115
+ audio_input = gr.Audio(type="filepath", label="Upload Audio File")
116
+ submit_button = gr.Button("Submit")
117
+
118
+ with gr.Column():
119
+ response_text = gr.Textbox(label="Response Text", placeholder="Generated response will appear here")
120
+ response_audio = gr.Audio(label="Response Audio", type="filepath")
121
+
122
+ submit_button.click(process_audio, inputs=audio_input, outputs=[response_text, response_audio])
123
+
124
+ # Launch the Gradio app
125
+ demo.launch()