Spaces:
Sleeping
Sleeping
Update metrics.py
Browse files- metrics.py +0 -47
metrics.py
CHANGED
@@ -21,35 +21,6 @@ def compute_semantic_similarity(original, paraphrased):
|
|
21 |
print(f"Error computing semantic similarity: {str(e)}")
|
22 |
return None
|
23 |
|
24 |
-
def compute_emotion_shift(original, paraphrased):
|
25 |
-
"""
|
26 |
-
Compute the emotion shift between the original and paraphrased comment.
|
27 |
-
Returns the original emotion, paraphrased emotion, and whether the shift is positive.
|
28 |
-
"""
|
29 |
-
try:
|
30 |
-
emotion_classifier = metrics_models.load_emotion_classifier()
|
31 |
-
original_emotions = emotion_classifier(original)
|
32 |
-
paraphrased_emotions = emotion_classifier(paraphrased)
|
33 |
-
|
34 |
-
# Get the top emotion for each
|
35 |
-
original_emotion = max(original_emotions[0], key=lambda x: x['score'])['label']
|
36 |
-
paraphrased_emotion = max(paraphrased_emotions[0], key=lambda x: x['score'])['label']
|
37 |
-
|
38 |
-
# Define negative and positive emotions
|
39 |
-
negative_emotions = ['anger', 'sadness', 'fear']
|
40 |
-
positive_emotions = ['joy', 'love', 'surprise']
|
41 |
-
|
42 |
-
# Determine if the shift is positive
|
43 |
-
emotion_shift_positive = (
|
44 |
-
(original_emotion in negative_emotions and paraphrased_emotion in positive_emotions) or
|
45 |
-
(original_emotion in negative_emotions and paraphrased_emotion not in negative_emotions)
|
46 |
-
)
|
47 |
-
|
48 |
-
return original_emotion, paraphrased_emotion, emotion_shift_positive
|
49 |
-
except Exception as e:
|
50 |
-
print(f"Error computing emotion shift: {str(e)}")
|
51 |
-
return None, None, None
|
52 |
-
|
53 |
def compute_empathy_score(paraphrased):
|
54 |
"""
|
55 |
Compute an empathy score for the paraphrased comment (placeholder).
|
@@ -95,22 +66,4 @@ def compute_rouge_score(original, paraphrased):
|
|
95 |
}
|
96 |
except Exception as e:
|
97 |
print(f"Error computing ROUGE scores: {str(e)}")
|
98 |
-
return None
|
99 |
-
|
100 |
-
def compute_entailment_score(original, paraphrased):
|
101 |
-
"""
|
102 |
-
Compute the entailment score to check factual consistency using an NLI model.
|
103 |
-
Returns a score between 0 and 1.
|
104 |
-
"""
|
105 |
-
try:
|
106 |
-
nli_classifier = metrics_models.load_nli_classifier()
|
107 |
-
result = nli_classifier(
|
108 |
-
original,
|
109 |
-
paraphrased,
|
110 |
-
candidate_labels=["entailment", "contradiction", "neutral"]
|
111 |
-
)
|
112 |
-
entailment_score = next(score for label, score in zip(result['labels'], result['scores']) if label == "entailment")
|
113 |
-
return round(entailment_score, 2)
|
114 |
-
except Exception as e:
|
115 |
-
print(f"Error computing entailment score: {str(e)}")
|
116 |
return None
|
|
|
21 |
print(f"Error computing semantic similarity: {str(e)}")
|
22 |
return None
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def compute_empathy_score(paraphrased):
|
25 |
"""
|
26 |
Compute an empathy score for the paraphrased comment (placeholder).
|
|
|
66 |
}
|
67 |
except Exception as e:
|
68 |
print(f"Error computing ROUGE scores: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
return None
|