JanviMl commited on
Commit
c91906e
·
verified ·
1 Parent(s): 829572e

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +17 -4
classifier.py CHANGED
@@ -2,15 +2,16 @@
2
  import torch
3
  from model_loader import classifier_model, classifier_tokenizer
4
  from paraphraser import paraphrase_comment
 
5
 
6
  def classify_toxic_comment(comment):
7
  """
8
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
9
- If toxic, paraphrase the comment and re-evaluate.
10
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
11
  """
12
  if not comment.strip():
13
- return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None
14
 
15
  # Tokenize the input comment
16
  inputs = classifier_tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
@@ -34,13 +35,18 @@ def classify_toxic_comment(comment):
34
  bias_score = 0.01 if label == "Non-Toxic" else 0.15
35
  bias_score = round(bias_score, 2)
36
 
37
- # If the comment is toxic, paraphrase it
38
  paraphrased_comment = None
39
  paraphrased_prediction = None
40
  paraphrased_confidence = None
41
  paraphrased_color = None
42
  paraphrased_toxicity_score = None
43
  paraphrased_bias_score = None
 
 
 
 
 
44
 
45
  if label == "Toxic":
46
  # Paraphrase the comment
@@ -61,8 +67,15 @@ def classify_toxic_comment(comment):
61
  paraphrased_bias_score = 0.01 if paraphrased_label == "Non-Toxic" else 0.15 # Placeholder
62
  paraphrased_bias_score = round(paraphrased_bias_score, 2)
63
 
 
 
 
 
 
64
  return (
65
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
66
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
67
- paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score
 
 
68
  )
 
2
  import torch
3
  from model_loader import classifier_model, classifier_tokenizer
4
  from paraphraser import paraphrase_comment
5
+ from metrics import compute_semantic_similarity, compute_emotion_shift, compute_empathy_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
9
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
10
+ If toxic, paraphrase the comment, re-evaluate, and compute additional Stage 3 metrics.
11
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
  if not comment.strip():
14
+ return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None, None
15
 
16
  # Tokenize the input comment
17
  inputs = classifier_tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
 
35
  bias_score = 0.01 if label == "Non-Toxic" else 0.15
36
  bias_score = round(bias_score, 2)
37
 
38
+ # If the comment is toxic, paraphrase it and compute additional metrics
39
  paraphrased_comment = None
40
  paraphrased_prediction = None
41
  paraphrased_confidence = None
42
  paraphrased_color = None
43
  paraphrased_toxicity_score = None
44
  paraphrased_bias_score = None
45
+ semantic_similarity = None
46
+ original_emotion = None
47
+ paraphrased_emotion = None
48
+ emotion_shift_positive = None
49
+ empathy_score = None
50
 
51
  if label == "Toxic":
52
  # Paraphrase the comment
 
67
  paraphrased_bias_score = 0.01 if paraphrased_label == "Non-Toxic" else 0.15 # Placeholder
68
  paraphrased_bias_score = round(paraphrased_bias_score, 2)
69
 
70
+ # Compute additional Stage 3 metrics
71
+ semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
72
+ original_emotion, paraphrased_emotion, emotion_shift_positive = compute_emotion_shift(comment, paraphrased_comment)
73
+ empathy_score = compute_empathy_score(paraphrased_comment)
74
+
75
  return (
76
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
77
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
78
+ paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
79
+ semantic_similarity, f"Original: {original_emotion}, Paraphrased: {paraphrased_emotion}, Positive Shift: {emotion_shift_positive}" if original_emotion else None,
80
+ empathy_score
81
  )