SalesAI / app.py
Zasha1's picture
Update app.py
6ce6beb verified
raw
history blame
11.6 kB
from streamlit_webrtc import webrtc_streamer, WebRtcMode
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
from product_recommender import ProductRecommender
from objection_handler import ObjectionHandler
from google_sheets import fetch_call_data, store_data_in_sheet
from sentence_transformers import SentenceTransformer
from env_setup import config
import re
import uuid
import pandas as pd
import plotly.express as px
import streamlit as st
import numpy as np
from io import BytesIO
import wave
import threading
import queue
# Initialize components
objection_handler = ObjectionHandler("objections.csv") # Use relative path
product_recommender = ProductRecommender("recommendations.csv") # Use relative path
model = SentenceTransformer('all-MiniLM-L6-v2')
# Queue to hold transcribed text
transcription_queue = queue.Queue()
def generate_comprehensive_summary(chunks):
"""
Generate a comprehensive summary from conversation chunks
"""
# Extract full text from chunks
full_text = " ".join([chunk[0] for chunk in chunks])
# Perform basic analysis
total_chunks = len(chunks)
sentiments = [chunk[1] for chunk in chunks]
# Determine overall conversation context
context_keywords = {
'product_inquiry': ['dress', 'product', 'price', 'stock'],
'pricing': ['cost', 'price', 'budget'],
'negotiation': ['installment', 'payment', 'manage']
}
# Detect conversation themes
themes = []
for keyword_type, keywords in context_keywords.items():
if any(keyword.lower() in full_text.lower() for keyword in keywords):
themes.append(keyword_type)
# Basic sentiment analysis
positive_count = sentiments.count('POSITIVE')
negative_count = sentiments.count('NEGATIVE')
neutral_count = sentiments.count('NEUTRAL')
# Key interaction highlights
key_interactions = []
for chunk in chunks:
if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
key_interactions.append(chunk[0])
# Construct summary
summary = f"Conversation Summary:\n"
# Context and themes
if 'product_inquiry' in themes:
summary += "• Customer initiated a product inquiry about items.\n"
if 'pricing' in themes:
summary += "• Price and budget considerations were discussed.\n"
if 'negotiation' in themes:
summary += "• Customer and seller explored flexible payment options.\n"
# Sentiment insights
summary += f"\nConversation Sentiment:\n"
summary += f"• Positive Interactions: {positive_count}\n"
summary += f"• Negative Interactions: {negative_count}\n"
summary += f"• Neutral Interactions: {neutral_count}\n"
# Key highlights
summary += "\nKey Conversation Points:\n"
for interaction in key_interactions[:3]: # Limit to top 3 key points
summary += f"• {interaction}\n"
# Conversation outcome
if positive_count > negative_count:
summary += "\nOutcome: Constructive and potentially successful interaction."
elif negative_count > positive_count:
summary += "\nOutcome: Interaction may require further follow-up."
else:
summary += "\nOutcome: Neutral interaction with potential for future engagement."
return summary
def is_valid_input(text):
text = text.strip().lower()
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
return False
return True
def is_relevant_sentiment(sentiment_score):
return sentiment_score > 0.4
def calculate_overall_sentiment(sentiment_scores):
if sentiment_scores:
average_sentiment = sum(sentiment_scores) / len(sentiment_scores)
overall_sentiment = (
"POSITIVE" if average_sentiment > 0 else
"NEGATIVE" if average_sentiment < 0 else
"NEUTRAL"
)
else:
overall_sentiment = "NEUTRAL"
return overall_sentiment
def handle_objection(text):
query_embedding = model.encode([text])
distances, indices = objection_handler.index.search(query_embedding, 1)
if distances[0][0] < 1.5: # Adjust similarity threshold as needed
responses = objection_handler.handle_objection(text)
return "\n".join(responses) if responses else "No objection response found."
return "No objection response found."
def transcribe_audio(audio_bytes, sample_rate=16000):
"""Transcribe audio using the transcribe_with_chunks function from sentiment_analysis.py."""
try:
# Save audio bytes to a temporary WAV file
with BytesIO() as wav_buffer:
with wave.open(wav_buffer, 'wb') as wf:
wf.setnchannels(1) # Mono audio
wf.setsampwidth(2) # 2 bytes for int16
wf.setframerate(sample_rate) # Sample rate
wf.writeframes(audio_bytes)
# Use the transcribe_with_chunks function from sentiment_analysis.py
chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
if chunks:
return chunks[-1][0] # Return the latest transcribed text
except Exception as e:
print(f"Error transcribing audio: {e}")
return None
def audio_processing_thread(audio_frame):
"""Thread function to process audio frames."""
# Convert audio frame to bytes
audio_data = audio_frame.to_ndarray()
print(f"Audio data shape: {audio_data.shape}") # Debug: Check audio data shape
print(f"Audio data sample: {audio_data[:10]}") # Debug: Check first 10 samples
audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
# Transcribe the audio
text = transcribe_audio(audio_bytes)
if text:
transcription_queue.put(text) # Add transcribed text to the queue
def real_time_analysis():
st.info("Listening... Say 'stop' to end the process.")
def audio_frame_callback(audio_frame):
# Start a new thread to process the audio frame
threading.Thread(target=audio_processing_thread, args=(audio_frame,)).start()
return audio_frame
# Start WebRTC audio stream
webrtc_ctx = webrtc_streamer(
key="real-time-audio",
mode=WebRtcMode.SENDONLY,
audio_frame_callback=audio_frame_callback,
media_stream_constraints={"audio": True, "video": False},
)
# Display transcribed text from the queue
while not transcription_queue.empty():
text = transcription_queue.get()
st.write(f"*Recognized Text:* {text}")
# Analyze sentiment
sentiment, score = analyze_sentiment(text)
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
# Handle objection
objection_response = handle_objection(text)
st.write(f"*Objection Response:* {objection_response}")
# Get product recommendation
recommendations = []
if is_valid_input(text) and is_relevant_sentiment(score):
query_embedding = model.encode([text])
distances, indices = product_recommender.index.search(query_embedding, 1)
if distances[0][0] < 1.5: # Similarity threshold
recommendations = product_recommender.get_recommendations(text)
if recommendations:
st.write("*Product Recommendations:*")
for rec in recommendations:
st.write(rec)
def run_app():
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
st.title("AI Sales Call Assistant")
st.sidebar.title("Navigation")
app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
if app_mode == "Real-Time Call Analysis":
st.header("Real-Time Sales Call Analysis")
real_time_analysis()
elif app_mode == "Dashboard":
st.header("Call Summaries and Sentiment Analysis")
try:
data = fetch_call_data(config["google_sheet_id"])
if data.empty:
st.warning("No data available in the Google Sheet.")
else:
# Sentiment Visualizations
sentiment_counts = data['Sentiment'].value_counts()
# Pie Chart
col1, col2 = st.columns(2)
with col1:
st.subheader("Sentiment Distribution")
fig_pie = px.pie(
values=sentiment_counts.values,
names=sentiment_counts.index,
title='Call Sentiment Breakdown',
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_pie)
# Bar Chart
with col2:
st.subheader("Sentiment Counts")
fig_bar = px.bar(
x=sentiment_counts.index,
y=sentiment_counts.values,
title='Number of Calls by Sentiment',
labels={'x': 'Sentiment', 'y': 'Number of Calls'},
color=sentiment_counts.index,
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_bar)
# Existing Call Details Section
st.subheader("All Calls")
display_data = data.copy()
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
# Dropdown to select Call ID
unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
# Display selected Call ID details
call_details = data[data['Call ID'] == call_id]
if not call_details.empty:
st.subheader("Detailed Call Information")
st.write(f"**Call ID:** {call_id}")
st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
# Expand summary section
st.subheader("Full Call Summary")
st.text_area("Summary:",
value=call_details.iloc[0]['Summary'],
height=200,
disabled=True)
# Show all chunks for the selected call
st.subheader("Conversation Chunks")
for _, row in call_details.iterrows():
if pd.notna(row['Chunk']):
st.write(f"**Chunk:** {row['Chunk']}")
st.write(f"**Sentiment:** {row['Sentiment']}")
st.write("---") # Separator between chunks
else:
st.error("No details available for the selected Call ID.")
except Exception as e:
st.error(f"Error loading dashboard: {e}")
if __name__ == "__main__":
run_app()