Spaces:
Sleeping
Sleeping
# classifier.py | |
import torch | |
from model_loader import model, tokenizer | |
def classify_toxic_comment(comment): | |
""" | |
Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model. | |
Returns the prediction label, confidence, and color for UI display. | |
""" | |
if not comment.strip(): | |
return "Error: Please enter a comment.", None, None | |
# Tokenize the input comment | |
inputs = tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
# Run inference | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
logits = outputs.logits | |
# Get the predicted class (0 = non-toxic, 1 = toxic) | |
predicted_class = torch.argmax(logits, dim=1).item() | |
label = "Toxic" if predicted_class == 1 else "Non-Toxic" | |
confidence = torch.softmax(logits, dim=1)[0][predicted_class].item() | |
label_color = "red" if label == "Toxic" else "green" | |
return f"Prediction: {label}", confidence, label_color |