JanviMl commited on
Commit
d6b5249
·
verified ·
1 Parent(s): 0625fce

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +13 -3
classifier.py CHANGED
@@ -5,10 +5,10 @@ from model_loader import model, tokenizer
5
  def classify_toxic_comment(comment):
6
  """
7
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
8
- Returns the prediction label, confidence, and color for UI display.
9
  """
10
  if not comment.strip():
11
- return "Error: Please enter a comment.", None, None
12
 
13
  # Tokenize the input comment
14
  inputs = tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
@@ -24,4 +24,14 @@ def classify_toxic_comment(comment):
24
  confidence = torch.softmax(logits, dim=1)[0][predicted_class].item()
25
  label_color = "red" if label == "Toxic" else "green"
26
 
27
- return f"Prediction: {label}", confidence, label_color
 
 
 
 
 
 
 
 
 
 
 
5
  def classify_toxic_comment(comment):
6
  """
7
  Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
8
+ Returns the prediction label, confidence, color, toxicity score, and bias score for UI display.
9
  """
10
  if not comment.strip():
11
+ return "Error: Please enter a comment.", None, None, None, None
12
 
13
  # Tokenize the input comment
14
  inputs = tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
 
24
  confidence = torch.softmax(logits, dim=1)[0][predicted_class].item()
25
  label_color = "red" if label == "Toxic" else "green"
26
 
27
+ # Simulate Toxicity Score (in a real scenario, use a model like Detoxify)
28
+ # For now, we'll approximate it based on the confidence of the toxic class
29
+ toxicity_score = torch.softmax(logits, dim=1)[0][1].item() # Probability of toxic class
30
+ toxicity_score = round(toxicity_score, 2)
31
+
32
+ # Simulate Bias Score (in a real scenario, use a bias detection model like WEAT)
33
+ # For now, we'll use a placeholder value (since the example comment is non-toxic)
34
+ bias_score = 0.01 if label == "Non-Toxic" else 0.15 # Placeholder logic
35
+ bias_score = round(bias_score, 2)
36
+
37
+ return f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score