Zasha1 commited on
Commit
03fbaa1
·
verified ·
1 Parent(s): b02432b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -159
app.py CHANGED
@@ -1,4 +1,5 @@
1
- import speech_recognition as sr
 
2
  from sentiment_analysis import analyze_sentiment
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
@@ -7,127 +8,75 @@ from sentence_transformers import SentenceTransformer
7
  from env_setup import config
8
  import re
9
  import uuid
10
- from google.oauth2 import service_account
11
- from googleapiclient.discovery import build
12
  import pandas as pd
13
  import plotly.express as px
14
- import plotly.graph_objs as go
15
  import streamlit as st
16
 
17
-
18
  # Initialize components
19
  objection_handler = ObjectionHandler('objections.csv')
20
  product_recommender = ProductRecommender('recommendations.csv')
21
  model = SentenceTransformer('all-MiniLM-L6-v2')
22
-
23
- def numpy_to_audio_data(audio_data):
24
- """
25
- Convert NumPy array to AudioData for speech_recognition
26
- """
27
- # Convert float32 to int16
28
- int_audio = (audio_data * 32767).astype(np.int16)
29
-
30
- # Create AudioData object
31
- recognizer = sr.Recognizer()
32
- audio_data = sr.AudioData(
33
- int_audio.tobytes(),
34
- sample_rate=16000,
35
- sample_width=int_audio.dtype.itemsize
36
- )
37
- return audio_data
38
 
39
  def real_time_analysis():
40
- st.info("Note: If microphone access fails, please use text input.")
41
 
42
- try:
43
- # Try to list available microphones
44
- available_mics = sr.Microphone.list_microphone_names()
45
- st.write(f"Available microphones: {available_mics}")
46
- except Exception as e:
47
- st.warning(f"Could not detect microphones: {e}")
48
 
49
  try:
50
- # Try multiple device indices
51
- mic = None
52
- for device_index in range(10): # Try first 10 device indices
53
- try:
54
- mic = sr.Microphone(device_index=device_index)
55
- st.write(f"Using microphone at device index {device_index}")
56
- break
57
- except Exception:
 
 
 
58
  continue
59
 
60
- if mic is None:
61
- # Fallback to text input if no microphone works
62
- st.warning("No microphone available. Switching to text input.")
63
- text_input = st.text_input("Enter conversation text:")
64
- if text_input:
65
- sentiment, score = analyze_sentiment(text_input)
66
- st.write(f"*Recognized Text:* {text_input}")
67
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
68
- return
69
 
70
- recognizer = sr.Recognizer()
71
- sentiment_scores = []
72
- transcribed_chunks = []
73
- total_text = ""
74
 
75
- st.info("Say 'stop' to end the process.")
 
 
 
76
 
77
- while True:
78
- with mic as source:
79
- st.write("Listening...")
80
- recognizer.adjust_for_ambient_noise(source)
81
- audio = recognizer.listen(source)
82
-
83
- try:
84
- st.write("Recognizing...")
85
- text = recognizer.recognize_google(audio)
86
- st.write(f"*Recognized Text:* {text}")
87
-
88
- if 'stop' in text.lower():
89
- st.write("Stopping real-time analysis...")
90
- break
91
-
92
-
93
- # Append to the total conversation
94
- total_text += text + " "
95
- sentiment, score = analyze_sentiment(text)
96
- sentiment_scores.append(score)
97
-
98
- # Handle objection
99
- objection_response = handle_objection(text)
100
-
101
- # Get product recommendation
102
- recommendations = []
103
- if is_valid_input(text) and is_relevant_sentiment(score):
104
- query_embedding = model.encode([text])
105
- distances, indices = product_recommender.index.search(query_embedding, 1)
106
-
107
- if distances[0][0] < 1.5: # Similarity threshold
108
- recommendations = product_recommender.get_recommendations(text)
109
-
110
- transcribed_chunks.append((text, sentiment, score))
111
-
112
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
113
- st.write(f"*Objection Response:* {objection_response}")
114
-
115
- if recommendations:
116
- st.write("*Product Recommendations:*")
117
- for rec in recommendations:
118
- st.write(rec)
119
-
120
- except sr.UnknownValueError:
121
- st.error("Speech Recognition could not understand the audio.")
122
- except sr.RequestError as e:
123
- st.error(f"Error with the Speech Recognition service: {e}")
124
- except Exception as e:
125
- st.error(f"Error during processing: {e}")
126
 
127
  # After conversation ends, calculate and display overall sentiment and summary
128
  overall_sentiment = calculate_overall_sentiment(sentiment_scores)
129
  call_summary = generate_comprehensive_summary(transcribed_chunks)
130
-
131
  st.subheader("Conversation Summary:")
132
  st.write(total_text.strip())
133
  st.subheader("Overall Sentiment:")
@@ -135,86 +84,84 @@ def real_time_analysis():
135
 
136
  # Store data in Google Sheets
137
  store_data_in_sheet(
138
- config["google_sheet_id"],
139
- transcribed_chunks,
140
- call_summary,
141
  overall_sentiment
142
  )
143
  st.success("Conversation data stored successfully in Google Sheets!")
144
 
145
  except Exception as e:
146
  st.error(f"Error in real-time analysis: {e}")
147
- st.warning("Unable to access microphone. Please use text input.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  def generate_comprehensive_summary(chunks):
150
- """
151
- Generate a comprehensive summary from conversation chunks
152
- """
153
- # Extract full text from chunks
154
  full_text = " ".join([chunk[0] for chunk in chunks])
155
-
156
- # Perform basic analysis
157
  total_chunks = len(chunks)
158
  sentiments = [chunk[1] for chunk in chunks]
159
-
160
- # Determine overall conversation context
161
  context_keywords = {
162
  'product_inquiry': ['dress', 'product', 'price', 'stock'],
163
  'pricing': ['cost', 'price', 'budget'],
164
  'negotiation': ['installment', 'payment', 'manage']
165
  }
166
-
167
- # Detect conversation themes
168
  themes = []
169
  for keyword_type, keywords in context_keywords.items():
170
  if any(keyword.lower() in full_text.lower() for keyword in keywords):
171
  themes.append(keyword_type)
172
-
173
- # Basic sentiment analysis
174
  positive_count = sentiments.count('POSITIVE')
175
  negative_count = sentiments.count('NEGATIVE')
176
  neutral_count = sentiments.count('NEUTRAL')
177
-
178
- # Key interaction highlights
179
  key_interactions = []
180
  for chunk in chunks:
181
  if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
182
  key_interactions.append(chunk[0])
183
-
184
- # Construct summary
185
  summary = f"Conversation Summary:\n"
186
-
187
- # Context and themes
188
  if 'product_inquiry' in themes:
189
  summary += "• Customer initiated a product inquiry about items.\n"
190
-
191
  if 'pricing' in themes:
192
  summary += "• Price and budget considerations were discussed.\n"
193
-
194
  if 'negotiation' in themes:
195
  summary += "• Customer and seller explored flexible payment options.\n"
196
-
197
- # Sentiment insights
198
  summary += f"\nConversation Sentiment:\n"
199
  summary += f"• Positive Interactions: {positive_count}\n"
200
  summary += f"• Negative Interactions: {negative_count}\n"
201
  summary += f"• Neutral Interactions: {neutral_count}\n"
202
-
203
- # Key highlights
204
  summary += "\nKey Conversation Points:\n"
205
- for interaction in key_interactions[:3]: # Limit to top 3 key points
206
  summary += f"• {interaction}\n"
207
-
208
- # Conversation outcome
209
  if positive_count > negative_count:
210
  summary += "\nOutcome: Constructive and potentially successful interaction."
211
  elif negative_count > positive_count:
212
  summary += "\nOutcome: Interaction may require further follow-up."
213
  else:
214
  summary += "\nOutcome: Neutral interaction with potential for future engagement."
215
-
216
- return summary
217
 
 
218
 
219
  def is_valid_input(text):
220
  text = text.strip().lower()
@@ -245,7 +192,6 @@ def handle_objection(text):
245
  return "\n".join(responses) if responses else "No objection response found."
246
  return "No objection response found."
247
 
248
-
249
  def run_app():
250
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
251
  st.title("AI Sales Call Assistant")
@@ -265,77 +211,69 @@ def run_app():
265
  if data.empty:
266
  st.warning("No data available in the Google Sheet.")
267
  else:
268
- # Sentiment Visualizations
269
  sentiment_counts = data['Sentiment'].value_counts()
270
-
271
- # Pie Chart
272
  col1, col2 = st.columns(2)
273
  with col1:
274
  st.subheader("Sentiment Distribution")
275
  fig_pie = px.pie(
276
- values=sentiment_counts.values,
277
- names=sentiment_counts.index,
278
  title='Call Sentiment Breakdown',
279
  color_discrete_map={
280
- 'POSITIVE': 'green',
281
- 'NEGATIVE': 'red',
282
  'NEUTRAL': 'blue'
283
  }
284
  )
285
  st.plotly_chart(fig_pie)
286
 
287
- # Bar Chart
288
  with col2:
289
  st.subheader("Sentiment Counts")
290
  fig_bar = px.bar(
291
- x=sentiment_counts.index,
292
- y=sentiment_counts.values,
293
  title='Number of Calls by Sentiment',
294
  labels={'x': 'Sentiment', 'y': 'Number of Calls'},
295
  color=sentiment_counts.index,
296
  color_discrete_map={
297
- 'POSITIVE': 'green',
298
- 'NEGATIVE': 'red',
299
  'NEUTRAL': 'blue'
300
  }
301
  )
302
  st.plotly_chart(fig_bar)
303
 
304
- # Existing Call Details Section
305
  st.subheader("All Calls")
306
  display_data = data.copy()
307
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
308
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
309
 
310
- # Dropdown to select Call ID
311
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
312
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
313
 
314
- # Display selected Call ID details
315
  call_details = data[data['Call ID'] == call_id]
316
  if not call_details.empty:
317
  st.subheader("Detailed Call Information")
318
  st.write(f"**Call ID:** {call_id}")
319
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
320
-
321
- # Expand summary section
322
  st.subheader("Full Call Summary")
323
- st.text_area("Summary:",
324
- value=call_details.iloc[0]['Summary'],
325
- height=200,
326
  disabled=True)
327
-
328
- # Show all chunks for the selected call
329
  st.subheader("Conversation Chunks")
330
  for _, row in call_details.iterrows():
331
- if pd.notna(row['Chunk']):
332
  st.write(f"**Chunk:** {row['Chunk']}")
333
  st.write(f"**Sentiment:** {row['Sentiment']}")
334
- st.write("---") # Separator between chunks
335
  else:
336
  st.error("No details available for the selected Call ID.")
337
  except Exception as e:
338
  st.error(f"Error loading dashboard: {e}")
339
 
340
  if __name__ == "__main__":
341
- run_app()
 
1
+ import sounddevice as sd
2
+ import numpy as np
3
  from sentiment_analysis import analyze_sentiment
4
  from product_recommender import ProductRecommender
5
  from objection_handler import ObjectionHandler
 
8
  from env_setup import config
9
  import re
10
  import uuid
 
 
11
  import pandas as pd
12
  import plotly.express as px
 
13
  import streamlit as st
14
 
 
15
  # Initialize components
16
  objection_handler = ObjectionHandler('objections.csv')
17
  product_recommender = ProductRecommender('recommendations.csv')
18
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def real_time_analysis():
21
+ st.info("Listening... Say 'stop' to end the process.")
22
 
23
+ samplerate = 16000 # Sample rate for audio capture
24
+ duration = 5 # Duration of each audio chunk in seconds
25
+ sentiment_scores = []
26
+ transcribed_chunks = []
27
+ total_text = ""
 
28
 
29
  try:
30
+ while True:
31
+ # Capture audio
32
+ audio_data = sd.rec(int(samplerate * duration), samplerate=samplerate, channels=1, dtype='float32')
33
+ sd.wait() # Wait for the recording to finish
34
+
35
+ # Convert audio data to bytes for processing
36
+ audio_bytes = (audio_data * 32767).astype(np.int16).tobytes()
37
+
38
+ # Analyze the audio
39
+ text = analyze_audio(audio_bytes, samplerate)
40
+ if not text:
41
  continue
42
 
43
+ st.write(f"*Recognized Text:* {text}")
 
 
 
 
 
 
 
 
44
 
45
+ if 'stop' in text.lower():
46
+ st.write("Stopping real-time analysis...")
47
+ break
 
48
 
49
+ # Append to the total conversation
50
+ total_text += text + " "
51
+ sentiment, score = analyze_sentiment(text)
52
+ sentiment_scores.append(score)
53
 
54
+ # Handle objection
55
+ objection_response = handle_objection(text)
56
+
57
+ # Get product recommendation
58
+ recommendations = []
59
+ if is_valid_input(text) and is_relevant_sentiment(score):
60
+ query_embedding = model.encode([text])
61
+ distances, indices = product_recommender.index.search(query_embedding, 1)
62
+
63
+ if distances[0][0] < 1.5: # Similarity threshold
64
+ recommendations = product_recommender.get_recommendations(text)
65
+
66
+ transcribed_chunks.append((text, sentiment, score))
67
+
68
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
69
+ st.write(f"*Objection Response:* {objection_response}")
70
+
71
+ if recommendations:
72
+ st.write("*Product Recommendations:*")
73
+ for rec in recommendations:
74
+ st.write(rec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # After conversation ends, calculate and display overall sentiment and summary
77
  overall_sentiment = calculate_overall_sentiment(sentiment_scores)
78
  call_summary = generate_comprehensive_summary(transcribed_chunks)
79
+
80
  st.subheader("Conversation Summary:")
81
  st.write(total_text.strip())
82
  st.subheader("Overall Sentiment:")
 
84
 
85
  # Store data in Google Sheets
86
  store_data_in_sheet(
87
+ config["google_sheet_id"],
88
+ transcribed_chunks,
89
+ call_summary,
90
  overall_sentiment
91
  )
92
  st.success("Conversation data stored successfully in Google Sheets!")
93
 
94
  except Exception as e:
95
  st.error(f"Error in real-time analysis: {e}")
96
+
97
+ def analyze_audio(audio_bytes, samplerate):
98
+ """Analyze audio data and return transcribed text."""
99
+ try:
100
+ # Use a speech-to-text model or API to transcribe the audio
101
+ # For simplicity, we'll use a placeholder function
102
+ text = transcribe_audio(audio_bytes, samplerate)
103
+ return text
104
+ except Exception as e:
105
+ st.error(f"Error analyzing audio: {e}")
106
+ return None
107
+
108
+ def transcribe_audio(audio_bytes, samplerate):
109
+ """Placeholder function for transcribing audio."""
110
+ # Replace this with your actual speech-to-text implementation
111
+ # For now, we'll just return a dummy text
112
+ return "This is a placeholder transcription."
113
 
114
  def generate_comprehensive_summary(chunks):
115
+ """Generate a comprehensive summary from conversation chunks."""
 
 
 
116
  full_text = " ".join([chunk[0] for chunk in chunks])
 
 
117
  total_chunks = len(chunks)
118
  sentiments = [chunk[1] for chunk in chunks]
119
+
 
120
  context_keywords = {
121
  'product_inquiry': ['dress', 'product', 'price', 'stock'],
122
  'pricing': ['cost', 'price', 'budget'],
123
  'negotiation': ['installment', 'payment', 'manage']
124
  }
125
+
 
126
  themes = []
127
  for keyword_type, keywords in context_keywords.items():
128
  if any(keyword.lower() in full_text.lower() for keyword in keywords):
129
  themes.append(keyword_type)
130
+
 
131
  positive_count = sentiments.count('POSITIVE')
132
  negative_count = sentiments.count('NEGATIVE')
133
  neutral_count = sentiments.count('NEUTRAL')
134
+
 
135
  key_interactions = []
136
  for chunk in chunks:
137
  if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
138
  key_interactions.append(chunk[0])
139
+
 
140
  summary = f"Conversation Summary:\n"
 
 
141
  if 'product_inquiry' in themes:
142
  summary += "• Customer initiated a product inquiry about items.\n"
 
143
  if 'pricing' in themes:
144
  summary += "• Price and budget considerations were discussed.\n"
 
145
  if 'negotiation' in themes:
146
  summary += "• Customer and seller explored flexible payment options.\n"
147
+
 
148
  summary += f"\nConversation Sentiment:\n"
149
  summary += f"• Positive Interactions: {positive_count}\n"
150
  summary += f"• Negative Interactions: {negative_count}\n"
151
  summary += f"• Neutral Interactions: {neutral_count}\n"
152
+
 
153
  summary += "\nKey Conversation Points:\n"
154
+ for interaction in key_interactions[:3]:
155
  summary += f"• {interaction}\n"
156
+
 
157
  if positive_count > negative_count:
158
  summary += "\nOutcome: Constructive and potentially successful interaction."
159
  elif negative_count > positive_count:
160
  summary += "\nOutcome: Interaction may require further follow-up."
161
  else:
162
  summary += "\nOutcome: Neutral interaction with potential for future engagement."
 
 
163
 
164
+ return summary
165
 
166
  def is_valid_input(text):
167
  text = text.strip().lower()
 
192
  return "\n".join(responses) if responses else "No objection response found."
193
  return "No objection response found."
194
 
 
195
  def run_app():
196
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
197
  st.title("AI Sales Call Assistant")
 
211
  if data.empty:
212
  st.warning("No data available in the Google Sheet.")
213
  else:
 
214
  sentiment_counts = data['Sentiment'].value_counts()
215
+
 
216
  col1, col2 = st.columns(2)
217
  with col1:
218
  st.subheader("Sentiment Distribution")
219
  fig_pie = px.pie(
220
+ values=sentiment_counts.values,
221
+ names=sentiment_counts.index,
222
  title='Call Sentiment Breakdown',
223
  color_discrete_map={
224
+ 'POSITIVE': 'green',
225
+ 'NEGATIVE': 'red',
226
  'NEUTRAL': 'blue'
227
  }
228
  )
229
  st.plotly_chart(fig_pie)
230
 
 
231
  with col2:
232
  st.subheader("Sentiment Counts")
233
  fig_bar = px.bar(
234
+ x=sentiment_counts.index,
235
+ y=sentiment_counts.values,
236
  title='Number of Calls by Sentiment',
237
  labels={'x': 'Sentiment', 'y': 'Number of Calls'},
238
  color=sentiment_counts.index,
239
  color_discrete_map={
240
+ 'POSITIVE': 'green',
241
+ 'NEGATIVE': 'red',
242
  'NEUTRAL': 'blue'
243
  }
244
  )
245
  st.plotly_chart(fig_bar)
246
 
 
247
  st.subheader("All Calls")
248
  display_data = data.copy()
249
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
250
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
251
 
 
252
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
253
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
254
 
 
255
  call_details = data[data['Call ID'] == call_id]
256
  if not call_details.empty:
257
  st.subheader("Detailed Call Information")
258
  st.write(f"**Call ID:** {call_id}")
259
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
260
+
 
261
  st.subheader("Full Call Summary")
262
+ st.text_area("Summary:",
263
+ value=call_details.iloc[0]['Summary'],
264
+ height=200,
265
  disabled=True)
266
+
 
267
  st.subheader("Conversation Chunks")
268
  for _, row in call_details.iterrows():
269
+ if pd.notna(row['Chunk']):
270
  st.write(f"**Chunk:** {row['Chunk']}")
271
  st.write(f"**Sentiment:** {row['Sentiment']}")
272
+ st.write("---")
273
  else:
274
  st.error("No details available for the selected Call ID.")
275
  except Exception as e:
276
  st.error(f"Error loading dashboard: {e}")
277
 
278
  if __name__ == "__main__":
279
+ run_app()