Spaces:
Sleeping
Sleeping
Create metrics.py
Browse files- metrics.py +64 -0
metrics.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# metrics.py
|
2 |
+
import torch
|
3 |
+
from sentence_transformers import SentenceTransformer, util
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
# Load Sentence-BERT model for semantic similarity
|
7 |
+
sentence_bert_model = SentenceTransformer('all-MiniLM-L6-v2')
|
8 |
+
|
9 |
+
# Load a pre-trained emotion classifier (placeholder; replace with a specific model if available)
|
10 |
+
emotion_classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion", top_k=None)
|
11 |
+
|
12 |
+
def compute_semantic_similarity(original_comment, paraphrased_comment):
|
13 |
+
"""
|
14 |
+
Compute the semantic similarity between the original and paraphrased comments using Sentence-BERT.
|
15 |
+
Returns a score between 0 and 1 (higher is better).
|
16 |
+
"""
|
17 |
+
# Encode the comments into embeddings
|
18 |
+
original_embedding = sentence_bert_model.encode(original_comment, convert_to_tensor=True)
|
19 |
+
paraphrased_embedding = sentence_bert_model.encode(paraphrased_comment, convert_to_tensor=True)
|
20 |
+
|
21 |
+
# Compute cosine similarity
|
22 |
+
similarity_score = util.cos_sim(original_embedding, paraphrased_embedding)[0][0].item()
|
23 |
+
return round(similarity_score, 2)
|
24 |
+
|
25 |
+
def compute_emotion_shift(original_comment, paraphrased_comment):
|
26 |
+
"""
|
27 |
+
Compute the shift in emotional tone between the original and paraphrased comments.
|
28 |
+
Returns the dominant emotion labels for both comments and a flag indicating if the shift is positive.
|
29 |
+
"""
|
30 |
+
# Classify emotions in the original comment
|
31 |
+
original_emotions = emotion_classifier(original_comment)
|
32 |
+
original_dominant_emotion = max(original_emotions, key=lambda x: x['score'])['label']
|
33 |
+
|
34 |
+
# Classify emotions in the paraphrased comment
|
35 |
+
paraphrased_emotions = emotion_classifier(paraphrased_comment)
|
36 |
+
paraphrased_dominant_emotion = max(paraphrased_emotions, key=lambda x: x['score'])['label']
|
37 |
+
|
38 |
+
# Define negative and positive emotions
|
39 |
+
negative_emotions = ['anger', 'sadness', 'fear']
|
40 |
+
positive_emotions = ['joy', 'love']
|
41 |
+
|
42 |
+
# Check if the shift is positive (e.g., from a negative emotion to a neutral/positive one)
|
43 |
+
is_positive_shift = (
|
44 |
+
original_dominant_emotion in negative_emotions and
|
45 |
+
(paraphrased_dominant_emotion in positive_emotions or paraphrased_dominant_emotion not in negative_emotions)
|
46 |
+
)
|
47 |
+
|
48 |
+
return original_dominant_emotion, paraphrased_dominant_emotion, is_positive_shift
|
49 |
+
|
50 |
+
def compute_empathy_score(paraphrased_comment):
|
51 |
+
"""
|
52 |
+
Compute a proxy empathy score based on politeness keywords.
|
53 |
+
Returns a score between 0 and 1 (higher indicates more empathy).
|
54 |
+
"""
|
55 |
+
# Define a list of politeness/empathy-related keywords
|
56 |
+
empathy_keywords = ['please', 'thank you', 'appreciate', 'understand', 'sorry', 'consider', 'kindly', 'help', 'support']
|
57 |
+
|
58 |
+
# Count the number of empathy keywords in the paraphrased comment
|
59 |
+
comment_lower = paraphrased_comment.lower()
|
60 |
+
keyword_count = sum(1 for keyword in empathy_keywords if keyword in comment_lower)
|
61 |
+
|
62 |
+
# Normalize the score (arbitrary scaling; max 3 keywords for a score of 1)
|
63 |
+
empathy_score = min(keyword_count / 3, 1.0)
|
64 |
+
return round(empathy_score, 2)
|