JanviMl commited on
Commit
9d22dca
·
verified ·
1 Parent(s): e5f485f

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +2 -4
classifier.py CHANGED
@@ -2,7 +2,7 @@
2
  import torch
3
  from model_loader import classifier_model
4
  from paraphraser import paraphrase_comment
5
- from metrics import compute_semantic_similarity, compute_empathy_score, compute_bleu_score, compute_rouge_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
@@ -48,7 +48,6 @@ def classify_toxic_comment(comment):
48
  paraphrased_bias_score = None
49
  semantic_similarity = None
50
  empathy_score = None
51
- bleu_score = None
52
  rouge_scores = None
53
 
54
  if label == "Toxic":
@@ -73,12 +72,11 @@ def classify_toxic_comment(comment):
73
  # Compute essential metrics
74
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
75
  empathy_score = compute_empathy_score(paraphrased_comment)
76
- bleu_score = compute_bleu_score(comment, paraphrased_comment)
77
  rouge_scores = compute_rouge_score(comment, paraphrased_comment)
78
 
79
  return (
80
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
81
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
82
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
83
- semantic_similarity, empathy_score, bleu_score, rouge_scores
84
  )
 
2
  import torch
3
  from model_loader import classifier_model
4
  from paraphraser import paraphrase_comment
5
+ from metrics import compute_semantic_similarity, compute_empathy_score, compute_rouge_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
 
48
  paraphrased_bias_score = None
49
  semantic_similarity = None
50
  empathy_score = None
 
51
  rouge_scores = None
52
 
53
  if label == "Toxic":
 
72
  # Compute essential metrics
73
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
74
  empathy_score = compute_empathy_score(paraphrased_comment)
 
75
  rouge_scores = compute_rouge_score(comment, paraphrased_comment)
76
 
77
  return (
78
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
79
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
80
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
81
+ semantic_similarity, empathy_score, rouge_scores
82
  )