JanviMl commited on
Commit
106e766
·
verified ·
1 Parent(s): 0553ede

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +18 -8
classifier.py CHANGED
@@ -1,8 +1,8 @@
1
  # classifier.py
2
  import torch
3
- from model_loader import classifier_model, classifier_tokenizer
4
  from paraphraser import paraphrase_comment
5
- from metrics import compute_semantic_similarity, compute_emotion_shift, compute_empathy_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
@@ -11,14 +11,18 @@ def classify_toxic_comment(comment):
11
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
  if not comment.strip():
14
- return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None, None
 
 
 
 
15
 
16
  # Tokenize the input comment
17
- inputs = classifier_tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
18
 
19
  # Run inference
20
  with torch.no_grad():
21
- outputs = classifier_model(**inputs)
22
  logits = outputs.logits
23
 
24
  # Get the predicted class (0 = non-toxic, 1 = toxic)
@@ -47,15 +51,18 @@ def classify_toxic_comment(comment):
47
  paraphrased_emotion = None
48
  emotion_shift_positive = None
49
  empathy_score = None
 
 
 
50
 
51
  if label == "Toxic":
52
  # Paraphrase the comment
53
  paraphrased_comment = paraphrase_comment(comment)
54
 
55
  # Re-evaluate the paraphrased comment
56
- paraphrased_inputs = classifier_tokenizer(paraphrased_comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
57
  with torch.no_grad():
58
- paraphrased_outputs = classifier_model(**paraphrased_inputs)
59
  paraphrased_logits = paraphrased_outputs.logits
60
 
61
  paraphrased_predicted_class = torch.argmax(paraphrased_logits, dim=1).item()
@@ -71,11 +78,14 @@ def classify_toxic_comment(comment):
71
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
72
  original_emotion, paraphrased_emotion, emotion_shift_positive = compute_emotion_shift(comment, paraphrased_comment)
73
  empathy_score = compute_empathy_score(paraphrased_comment)
 
 
 
74
 
75
  return (
76
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
77
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
78
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
79
  semantic_similarity, f"Original: {original_emotion}, Paraphrased: {paraphrased_emotion}, Positive Shift: {emotion_shift_positive}" if original_emotion else None,
80
- empathy_score
81
  )
 
1
  # classifier.py
2
  import torch
3
+ from model.classifier import classifier_model
4
  from paraphraser import paraphrase_comment
5
+ from metrics import compute_semantic_similarity, compute_emotion_shift, compute_empathy_score, compute_bleu_score, compute_rouge_score, compute_entailment_score
6
 
7
  def classify_toxic_comment(comment):
8
  """
 
11
  Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
  if not comment.strip():
14
+ return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
15
+
16
+ # Access the model and tokenizer
17
+ model = classifier_model.model
18
+ tokenizer = classifier_model.tokenizer
19
 
20
  # Tokenize the input comment
21
+ inputs = tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
22
 
23
  # Run inference
24
  with torch.no_grad():
25
+ outputs = model(**inputs)
26
  logits = outputs.logits
27
 
28
  # Get the predicted class (0 = non-toxic, 1 = toxic)
 
51
  paraphrased_emotion = None
52
  emotion_shift_positive = None
53
  empathy_score = None
54
+ bleu_score = None
55
+ rouge_scores = None
56
+ entailment_score = None
57
 
58
  if label == "Toxic":
59
  # Paraphrase the comment
60
  paraphrased_comment = paraphrase_comment(comment)
61
 
62
  # Re-evaluate the paraphrased comment
63
+ paraphrased_inputs = tokenizer(paraphrased_comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
64
  with torch.no_grad():
65
+ paraphrased_outputs = model(**paraphrased_inputs)
66
  paraphrased_logits = paraphrased_outputs.logits
67
 
68
  paraphrased_predicted_class = torch.argmax(paraphrased_logits, dim=1).item()
 
78
  semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
79
  original_emotion, paraphrased_emotion, emotion_shift_positive = compute_emotion_shift(comment, paraphrased_comment)
80
  empathy_score = compute_empathy_score(paraphrased_comment)
81
+ bleu_score = compute_bleu_score(comment, paraphrased_comment)
82
+ rouge_scores = compute_rouge_score(comment, paraphrased_comment)
83
+ entailment_score = compute_entailment_score(comment, paraphrased_comment)
84
 
85
  return (
86
  f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
87
  paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
88
  paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
89
  semantic_similarity, f"Original: {original_emotion}, Paraphrased: {paraphrased_emotion}, Positive Shift: {emotion_shift_positive}" if original_emotion else None,
90
+ empathy_score, bleu_score, rouge_scores, entailment_score
91
  )