Zasha1 commited on
Commit
2ab4df4
·
verified ·
1 Parent(s): 5fc78bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -65
app.py CHANGED
@@ -1,5 +1,4 @@
1
- import sounddevice as sd
2
- import numpy as np
3
  from sentiment_analysis import analyze_sentiment
4
  from product_recommender import ProductRecommender
5
  from objection_handler import ObjectionHandler
@@ -20,58 +19,63 @@ model = SentenceTransformer('all-MiniLM-L6-v2')
20
  def real_time_analysis():
21
  st.info("Listening... Say 'stop' to end the process.")
22
 
23
- samplerate = 16000 # Sample rate for audio capture
24
- duration = 5 # Duration of each audio chunk in seconds
25
  sentiment_scores = []
26
  transcribed_chunks = []
27
  total_text = ""
28
 
29
  try:
30
- while True:
31
- # Capture audio
32
- audio_data = sd.rec(int(samplerate * duration), samplerate=samplerate, channels=1, dtype='float32')
33
- sd.wait() # Wait for the recording to finish
34
-
35
- # Convert audio data to bytes for processing
36
- audio_bytes = (audio_data * 32767).astype(np.int16).tobytes()
37
-
38
- # Analyze the audio
39
- text = analyze_audio(audio_bytes, samplerate)
40
- if not text:
41
- continue
42
-
43
- st.write(f"*Recognized Text:* {text}")
44
-
45
- if 'stop' in text.lower():
46
- st.write("Stopping real-time analysis...")
47
- break
48
-
49
- # Append to the total conversation
50
- total_text += text + " "
51
- sentiment, score = analyze_sentiment(text)
52
- sentiment_scores.append(score)
53
-
54
- # Handle objection
55
- objection_response = handle_objection(text)
56
-
57
- # Get product recommendation
58
- recommendations = []
59
- if is_valid_input(text) and is_relevant_sentiment(score):
60
- query_embedding = model.encode([text])
61
- distances, indices = product_recommender.index.search(query_embedding, 1)
62
 
63
- if distances[0][0] < 1.5: # Similarity threshold
64
- recommendations = product_recommender.get_recommendations(text)
65
-
66
- transcribed_chunks.append((text, sentiment, score))
67
-
68
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
69
- st.write(f"*Objection Response:* {objection_response}")
70
-
71
- if recommendations:
72
- st.write("*Product Recommendations:*")
73
- for rec in recommendations:
74
- st.write(rec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # After conversation ends, calculate and display overall sentiment and summary
77
  overall_sentiment = calculate_overall_sentiment(sentiment_scores)
@@ -94,23 +98,6 @@ def real_time_analysis():
94
  except Exception as e:
95
  st.error(f"Error in real-time analysis: {e}")
96
 
97
- def analyze_audio(audio_bytes, samplerate):
98
- """Analyze audio data and return transcribed text."""
99
- try:
100
- # Use a speech-to-text model or API to transcribe the audio
101
- # For simplicity, we'll use a placeholder function
102
- text = transcribe_audio(audio_bytes, samplerate)
103
- return text
104
- except Exception as e:
105
- st.error(f"Error analyzing audio: {e}")
106
- return None
107
-
108
- def transcribe_audio(audio_bytes, samplerate):
109
- """Placeholder function for transcribing audio."""
110
- # Replace this with your actual speech-to-text implementation
111
- # For now, we'll just return a dummy text
112
- return "This is a placeholder transcription."
113
-
114
  def generate_comprehensive_summary(chunks):
115
  """Generate a comprehensive summary from conversation chunks."""
116
  full_text = " ".join([chunk[0] for chunk in chunks])
 
1
+ import speech_recognition as sr
 
2
  from sentiment_analysis import analyze_sentiment
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
 
19
  def real_time_analysis():
20
  st.info("Listening... Say 'stop' to end the process.")
21
 
22
+ recognizer = sr.Recognizer()
 
23
  sentiment_scores = []
24
  transcribed_chunks = []
25
  total_text = ""
26
 
27
  try:
28
+ # Use the virtual microphone device
29
+ mic = sr.Microphone(device_index=0) # Use the virtual microphone device index
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ while True:
32
+ with mic as source:
33
+ st.write("Listening...")
34
+ recognizer.adjust_for_ambient_noise(source)
35
+ audio = recognizer.listen(source)
36
+
37
+ try:
38
+ st.write("Recognizing...")
39
+ text = recognizer.recognize_google(audio)
40
+ st.write(f"*Recognized Text:* {text}")
41
+
42
+ if 'stop' in text.lower():
43
+ st.write("Stopping real-time analysis...")
44
+ break
45
+
46
+ # Append to the total conversation
47
+ total_text += text + " "
48
+ sentiment, score = analyze_sentiment(text)
49
+ sentiment_scores.append(score)
50
+
51
+ # Handle objection
52
+ objection_response = handle_objection(text)
53
+
54
+ # Get product recommendation
55
+ recommendations = []
56
+ if is_valid_input(text) and is_relevant_sentiment(score):
57
+ query_embedding = model.encode([text])
58
+ distances, indices = product_recommender.index.search(query_embedding, 1)
59
+
60
+ if distances[0][0] < 1.5: # Similarity threshold
61
+ recommendations = product_recommender.get_recommendations(text)
62
+
63
+ transcribed_chunks.append((text, sentiment, score))
64
+
65
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
66
+ st.write(f"*Objection Response:* {objection_response}")
67
+
68
+ if recommendations:
69
+ st.write("*Product Recommendations:*")
70
+ for rec in recommendations:
71
+ st.write(rec)
72
+
73
+ except sr.UnknownValueError:
74
+ st.error("Speech Recognition could not understand the audio.")
75
+ except sr.RequestError as e:
76
+ st.error(f"Error with the Speech Recognition service: {e}")
77
+ except Exception as e:
78
+ st.error(f"Error during processing: {e}")
79
 
80
  # After conversation ends, calculate and display overall sentiment and summary
81
  overall_sentiment = calculate_overall_sentiment(sentiment_scores)
 
98
  except Exception as e:
99
  st.error(f"Error in real-time analysis: {e}")
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  def generate_comprehensive_summary(chunks):
102
  """Generate a comprehensive summary from conversation chunks."""
103
  full_text = " ".join([chunk[0] for chunk in chunks])