SalesAI / sentiment_analysis.py
Zasha1's picture
Update sentiment_analysis.py
53f1073 verified
raw
history blame
5.73 kB
import os
import json
import time
import numpy as np
import sounddevice as sd
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
from huggingface_hub import login
from product_recommender import ProductRecommender
from objection_handler import load_objections, check_objections
from objection_handler import ObjectionHandler
from env_setup import config
from sentence_transformers import SentenceTransformer
from scipy.io.wavfile import write
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Hugging Face API setup
huggingface_api_key = config["huggingface_api_key"]
login(token=huggingface_api_key)
# Sentiment Analysis Model
model_name = "tabularisai/multilingual-sentiment-analysis"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
sentiment_analyzer = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
# Function to analyze sentiment
def preprocess_text(text):
"""Preprocess text for better sentiment analysis."""
return text.strip().lower()
def analyze_sentiment(text):
"""Analyze sentiment of the text using Hugging Face model."""
try:
if not text.strip():
return "NEUTRAL", 0.0
processed_text = preprocess_text(text)
result = sentiment_analyzer(processed_text)[0]
print(f"Sentiment Analysis Result: {result}")
# Map raw labels to sentiments
sentiment_map = {
'Very Negative': "NEGATIVE",
'Negative': "NEGATIVE",
'Neutral': "NEUTRAL",
'Positive': "POSITIVE",
'Very Positive': "POSITIVE"
}
sentiment = sentiment_map.get(result['label'], "NEUTRAL")
return sentiment, result['score']
except Exception as e:
print(f"Error in sentiment analysis: {e}")
return "NEUTRAL", 0.5
def record_audio(duration=5, sample_rate=44100):
"""Record audio for a specified duration."""
print("Recording audio...")
audio = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1, dtype='float32')
sd.wait() # Wait for the recording to finish
print("Recording completed.")
return np.squeeze(audio)
def transcribe_audio(audio, sample_rate=44100):
"""Transcribe recorded audio using a speech-to-text API."""
try:
# Save audio to a temporary file for transcription
audio_file = "temp_audio.wav"
write(audio_file, sample_rate, audio)
# Call external transcription service (e.g., Whisper, AssemblyAI, or Google)
transcription = "Example transcription text from audio." # Placeholder
return transcription
except Exception as e:
print(f"Error in audio transcription: {e}")
return ""
def transcribe_with_chunks(objections_dict):
"""Perform real-time transcription with sentiment analysis."""
print("Say 'start listening' to begin transcription. Say 'stop listening' to stop.")
is_listening = False
chunks = []
current_chunk = []
chunk_start_time = time.time()
# Initialize handlers with semantic search capabilities
objection_handler = ObjectionHandler("path_to_objections.csv")
product_recommender = ProductRecommender("path_to_recommendations.csv")
# Load the embeddings model once
model = SentenceTransformer('all-MiniLM-L6-v2')
try:
while True:
if not is_listening:
command = input("Enter 'start' to begin listening or 'stop' to quit: ").lower()
if command == "start":
is_listening = True
print("Listening started. Speak into the microphone.")
continue
elif command == "stop":
break
# Record and process audio in chunks
audio_data = record_audio(duration=5)
text = transcribe_audio(audio_data)
if text.strip():
print(f"Transcription: {text}")
current_chunk.append(text)
if time.time() - chunk_start_time > 3:
if current_chunk:
chunk_text = " ".join(current_chunk)
# Process sentiment
sentiment, score = analyze_sentiment(chunk_text)
chunks.append((chunk_text, sentiment, score))
# Handle objections and recommendations
query_embedding = model.encode([chunk_text])
responses = objection_handler.handle_objection(chunk_text)
recommendations = product_recommender.get_recommendations(chunk_text)
# Print results
if responses:
print("\nSuggested Response:")
for response in responses:
print(f"β†’ {response}")
if recommendations:
print("\nRecommendations for this response:")
for idx, rec in enumerate(recommendations, 1):
print(f"{idx}. {rec}")
print("\n")
current_chunk = []
chunk_start_time = time.time()
except KeyboardInterrupt:
print("\nExiting...")
return chunks
if __name__ == "__main__":
objections_file_path = "path_to_objections.csv"
objections_dict = load_objections(objections_file_path)
transcribed_chunks = transcribe_with_chunks(objections_dict)
print("Final transcriptions and sentiments:", transcribed_chunks)