File size: 3,126 Bytes
829572e
 
 
 
 
 
 
 
2e2bd15
829572e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e2bd15
 
 
829572e
 
 
2e2bd15
 
829572e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# metrics.py
import torch
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline

# Load Sentence-BERT model for semantic similarity
sentence_bert_model = SentenceTransformer('all-MiniLM-L6-v2')

# Load a pre-trained emotion classifier
emotion_classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion", top_k=None)

def compute_semantic_similarity(original_comment, paraphrased_comment):
    """
    Compute the semantic similarity between the original and paraphrased comments using Sentence-BERT.
    Returns a score between 0 and 1 (higher is better).
    """
    original_embedding = sentence_bert_model.encode(original_comment, convert_to_tensor=True)
    paraphrased_embedding = sentence_bert_model.encode(paraphrased_comment, convert_to_tensor=True)
    similarity_score = util.cos_sim(original_embedding, paraphrased_embedding)[0][0].item()
    return round(similarity_score, 2)

def compute_emotion_shift(original_comment, paraphrased_comment):
    """
    Compute the shift in emotional tone between the original and paraphrased comments.
    Returns the dominant emotion labels for both comments and a flag indicating if the shift is positive.
    """
    # Classify emotions in the original comment
    original_emotions = emotion_classifier(original_comment)
    # Since pipeline returns a list of lists, take the first (and only) inner list
    original_emotions = original_emotions[0] if isinstance(original_emotions, list) and original_emotions else []
    original_dominant_emotion = max(original_emotions, key=lambda x: x['score'])['label'] if original_emotions else "unknown"

    # Classify emotions in the paraphrased comment
    paraphrased_emotions = emotion_classifier(paraphrased_comment)
    paraphrased_emotions = paraphrased_emotions[0] if isinstance(paraphrased_emotions, list) and paraphrased_emotions else []
    paraphrased_dominant_emotion = max(paraphrased_emotions, key=lambda x: x['score'])['label'] if paraphrased_emotions else "unknown"

    # Define negative and positive emotions
    negative_emotions = ['anger', 'sadness', 'fear']
    positive_emotions = ['joy', 'love']

    # Check if the shift is positive (e.g., from a negative emotion to a neutral/positive one)
    is_positive_shift = (
        original_dominant_emotion in negative_emotions and
        (paraphrased_dominant_emotion in positive_emotions or paraphrased_dominant_emotion not in negative_emotions)
    )

    return original_dominant_emotion, paraphrased_dominant_emotion, is_positive_shift

def compute_empathy_score(paraphrased_comment):
    """
    Compute a proxy empathy score based on politeness keywords.
    Returns a score between 0 and 1 (higher indicates more empathy).
    """
    empathy_keywords = ['please', 'thank you', 'appreciate', 'understand', 'sorry', 'consider', 'kindly', 'help', 'support']
    comment_lower = paraphrased_comment.lower()
    keyword_count = sum(1 for keyword in empathy_keywords if keyword in comment_lower)
    empathy_score = min(keyword_count / 3, 1.0)
    return round(empathy_score, 2)