JanviMl commited on
Commit
ad0b71a
·
verified ·
1 Parent(s): fb34cdf

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +7 -14
classifier.py CHANGED
@@ -1,17 +1,17 @@
1
  # classifier.py
2
  import torch
3
- from model_loader import classifier_model # Updated import
4
  from paraphraser import paraphrase_comment
5
- from metrics import compute_semantic_similarity, compute_emotion_shift, compute_empathy_score, compute_bleu_score, compute_rouge_score, compute_entailment_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
9
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
10
- If toxic, paraphrase the comment, re-evaluate, and compute additional Stage 3 metrics.
11
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
  if not comment.strip():
14
- return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
15
 
16
  # Access the model and tokenizer
17
  model = classifier_model.model
@@ -39,7 +39,7 @@ def classify_toxic_comment(comment):
39
  bias_score = 0.01 if label == "Non-Toxic" else 0.15
40
  bias_score = round(bias_score, 2)
41
 
42
- # If the comment is toxic, paraphrase it and compute additional metrics
43
  paraphrased_comment = None
44
  paraphrased_prediction = None
45
  paraphrased_confidence = None
@@ -47,13 +47,9 @@ def classify_toxic_comment(comment):
47
  paraphrased_toxicity_score = None
48
  paraphrased_bias_score = None
49
  semantic_similarity = None
50
- original_emotion = None
51
- paraphrased_emotion = None
52
- emotion_shift_positive = None
53
  empathy_score = None
54
  bleu_score = None
55
  rouge_scores = None
56
- entailment_score = None
57
 
58
  if label == "Toxic":
59
  # Paraphrase the comment
@@ -74,18 +70,15 @@ def classify_toxic_comment(comment):
74
  paraphrased_bias_score = 0.01 if paraphrased_label == "Non-Toxic" else 0.15 # Placeholder
75
  paraphrased_bias_score = round(paraphrased_bias_score, 2)
76
 
77
- # Compute additional Stage 3 metrics
78
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
79
- original_emotion, paraphrased_emotion, emotion_shift_positive = compute_emotion_shift(comment, paraphrased_comment)
80
  empathy_score = compute_empathy_score(paraphrased_comment)
81
  bleu_score = compute_bleu_score(comment, paraphrased_comment)
82
  rouge_scores = compute_rouge_score(comment, paraphrased_comment)
83
- entailment_score = compute_entailment_score(comment, paraphrased_comment)
84
 
85
  return (
86
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
87
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
88
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
89
- semantic_similarity, f"Original: {original_emotion}, Paraphrased: {paraphrased_emotion}, Positive Shift: {emotion_shift_positive}" if original_emotion else None,
90
- empathy_score, bleu_score, rouge_scores, entailment_score
91
  )
 
1
  # classifier.py
2
  import torch
3
+ from model_loader import classifier_model
4
  from paraphraser import paraphrase_comment
5
+ from metrics import compute_semantic_similarity, compute_empathy_score, compute_bleu_score, compute_rouge_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
9
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
10
+ If toxic, paraphrase the comment, re-evaluate, and compute essential metrics.
11
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
  if not comment.strip():
14
+ return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None
15
 
16
  # Access the model and tokenizer
17
  model = classifier_model.model
 
39
  bias_score = 0.01 if label == "Non-Toxic" else 0.15
40
  bias_score = round(bias_score, 2)
41
 
42
+ # If the comment is toxic, paraphrase it and compute essential metrics
43
  paraphrased_comment = None
44
  paraphrased_prediction = None
45
  paraphrased_confidence = None
 
47
  paraphrased_toxicity_score = None
48
  paraphrased_bias_score = None
49
  semantic_similarity = None
 
 
 
50
  empathy_score = None
51
  bleu_score = None
52
  rouge_scores = None
 
53
 
54
  if label == "Toxic":
55
  # Paraphrase the comment
 
70
  paraphrased_bias_score = 0.01 if paraphrased_label == "Non-Toxic" else 0.15 # Placeholder
71
  paraphrased_bias_score = round(paraphrased_bias_score, 2)
72
 
73
+ # Compute essential metrics
74
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
 
75
  empathy_score = compute_empathy_score(paraphrased_comment)
76
  bleu_score = compute_bleu_score(comment, paraphrased_comment)
77
  rouge_scores = compute_rouge_score(comment, paraphrased_comment)
 
78
 
79
  return (
80
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
81
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
82
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
83
+ semantic_similarity, empathy_score, bleu_score, rouge_scores
 
84
  )