Zasha1 commited on
Commit
c95c282
·
verified ·
1 Parent(s): e99ed09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -63
app.py CHANGED
@@ -1,5 +1,4 @@
1
- from streamlit_webrtc import webrtc_streamer, WebRtcMode
2
- from sentiment_analysis import analyze_sentiment
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
5
  from google_sheets import fetch_call_data, store_data_in_sheet
@@ -10,14 +9,10 @@ import uuid
10
  import pandas as pd
11
  import plotly.express as px
12
  import streamlit as st
13
- import numpy as np
14
- from io import BytesIO
15
- import wave
16
 
17
  # Initialize components
18
- # Initialize components
19
- product_recommender = ProductRecommender("recommendations.csv") # Relative path
20
- objection_handler = ObjectionHandler("objections.csv") # Relative path
21
  model = SentenceTransformer('all-MiniLM-L6-v2')
22
 
23
  def generate_comprehensive_summary(chunks):
@@ -121,60 +116,29 @@ def handle_objection(text):
121
  def real_time_analysis():
122
  st.info("Listening... Say 'stop' to end the process.")
123
 
124
- def audio_frame_callback(audio_frame):
125
- # Convert audio frame to bytes
126
- audio_bytes = audio_frame.to_ndarray().tobytes()
127
-
128
- # Save audio bytes to a temporary WAV file
129
- with BytesIO() as wav_buffer:
130
- with wave.open(wav_buffer, 'wb') as wav_file:
131
- wav_file.setnchannels(1) # Mono audio
132
- wav_file.setsampwidth(2) # 2 bytes for int16
133
- wav_file.setframerate(16000) # Sample rate
134
- wav_file.writeframes(audio_bytes)
135
-
136
- # Transcribe the audio
137
- text = transcribe_audio(wav_buffer.getvalue())
138
- if text:
139
- st.write(f"*Recognized Text:* {text}")
140
-
141
- # Analyze sentiment
142
- sentiment, score = analyze_sentiment(text)
143
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
144
-
145
- # Handle objection
146
- objection_response = handle_objection(text)
147
- st.write(f"*Objection Response:* {objection_response}")
148
-
149
- # Get product recommendation
150
- recommendations = []
151
- if is_valid_input(text) and is_relevant_sentiment(score):
152
- query_embedding = model.encode([text])
153
- distances, indices = product_recommender.index.search(query_embedding, 1)
154
-
155
- if distances[0][0] < 1.5: # Similarity threshold
156
- recommendations = product_recommender.get_recommendations(text)
157
-
158
- if recommendations:
159
- st.write("*Product Recommendations:*")
160
- for rec in recommendations:
161
- st.write(rec)
162
-
163
- return audio_frame
164
-
165
- # Start WebRTC audio stream
166
- webrtc_ctx = webrtc_streamer(
167
- key="real-time-audio",
168
- mode=WebRtcMode.SENDONLY,
169
- audio_frame_callback=audio_frame_callback,
170
- media_stream_constraints={"audio": True, "video": False},
171
- )
172
-
173
- def transcribe_audio(audio_bytes):
174
- """Transcribe audio using a speech-to-text model or API."""
175
- # Replace this with your actual speech-to-text implementation
176
- # For now, we'll just return a dummy text
177
- return "This is a placeholder transcription."
178
 
179
  def run_app():
180
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
@@ -185,7 +149,8 @@ def run_app():
185
 
186
  if app_mode == "Real-Time Call Analysis":
187
  st.header("Real-Time Sales Call Analysis")
188
- real_time_analysis()
 
189
 
190
  elif app_mode == "Dashboard":
191
  st.header("Call Summaries and Sentiment Analysis")
 
1
+ from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
 
2
  from product_recommender import ProductRecommender
3
  from objection_handler import ObjectionHandler
4
  from google_sheets import fetch_call_data, store_data_in_sheet
 
9
  import pandas as pd
10
  import plotly.express as px
11
  import streamlit as st
 
 
 
12
 
13
  # Initialize components
14
+ objection_handler = ObjectionHandler("objections.csv") # Use relative path
15
+ product_recommender = ProductRecommender("recommendations.csv") # Use relative path
 
16
  model = SentenceTransformer('all-MiniLM-L6-v2')
17
 
18
  def generate_comprehensive_summary(chunks):
 
116
  def real_time_analysis():
117
  st.info("Listening... Say 'stop' to end the process.")
118
 
119
+ try:
120
+ # Use the transcribe_with_chunks function from sentiment_analysis.py
121
+ chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
122
+ if chunks:
123
+ # After conversation ends, calculate and display overall sentiment and summary
124
+ overall_sentiment = calculate_overall_sentiment([chunk[2] for chunk in chunks])
125
+ call_summary = generate_comprehensive_summary(chunks)
126
+
127
+ st.subheader("Conversation Summary:")
128
+ st.write(call_summary)
129
+ st.subheader("Overall Sentiment:")
130
+ st.write(overall_sentiment)
131
+
132
+ # Store data in Google Sheets
133
+ store_data_in_sheet(
134
+ config["google_sheet_id"],
135
+ chunks,
136
+ call_summary,
137
+ overall_sentiment
138
+ )
139
+ st.success("Conversation data stored successfully in Google Sheets!")
140
+ except Exception as e:
141
+ st.error(f"Error in real-time analysis: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  def run_app():
144
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
 
149
 
150
  if app_mode == "Real-Time Call Analysis":
151
  st.header("Real-Time Sales Call Analysis")
152
+ if st.button("Start Listening"):
153
+ real_time_analysis()
154
 
155
  elif app_mode == "Dashboard":
156
  st.header("Call Summaries and Sentiment Analysis")