JanviMl commited on
Commit
444a6ca
·
verified ·
1 Parent(s): ab6d0c4

Update paraphraser.py

Browse files
Files changed (1) hide show
  1. paraphraser.py +86 -43
paraphraser.py CHANGED
@@ -1,48 +1,91 @@
1
- # paraphraser.py
2
  import torch
3
- from model_loader import paraphrase_model, paraphrase_tokenizer
 
 
4
 
5
- def paraphrase_comment(comment):
6
  """
7
- Paraphrase a toxic comment using the Granite 3.2-2B-Instruct model.
8
- Returns the paraphrased comment.
 
9
  """
10
- # Define the paraphrasing prompt with system instruction, guidelines, examples, and the task
11
- prompt = (
12
- "You are a content moderator tasked with rewriting toxic comments into neutral and constructive ones while maintaining the original meaning.\n"
13
- "Guidelines:\n"
14
- "- Remove explicit hate speech, personal attacks, or offensive language.\n"
15
- "- Keep the response neutral and professional.\n"
16
- "- Ensure the rewritten comment retains the original intent but in a constructive tone.\n"
17
- "Examples:\n"
18
- "Toxic: \"You're so dumb! You never understand anything!\"\n"
19
- "Neutral: \"I think there's some misunderstanding. Let's clarify things.\"\n"
20
- "Toxic: \"This is the worst idea ever. Only an idiot would suggest this.\"\n"
21
- "Neutral: \"I don't think this idea works well. Maybe we can explore other options.\"\n"
22
- "Now, rewrite this comment: \"{comment}\""
23
- )
24
-
25
- # Format the prompt with the input comment
26
- prompt = prompt.format(comment=comment)
27
-
28
- # Tokenize the prompt
29
- inputs = paraphrase_tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512)
30
-
31
- # Generate the paraphrased output
32
  with torch.no_grad():
33
- outputs = paraphrase_model.generate(
34
- **inputs,
35
- max_length=512,
36
- num_return_sequences=1,
37
- do_sample=True,
38
- top_p=0.95,
39
- temperature=0.7
40
- )
41
-
42
- # Decode the generated output
43
- paraphrased_comment = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
44
-
45
- # Remove the prompt part from the output (if the model includes it)
46
- paraphrased_comment = paraphrased_comment.replace(prompt, "").strip()
47
-
48
- return paraphrased_comment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # classifier.py
2
  import torch
3
+ from model_loader import classifier_model
4
+ from paraphraser import paraphrase_comment
5
+ from metrics import compute_semantic_similarity, compute_emotion_shift, compute_empathy_score, compute_bleu_score, compute_rouge_score, compute_entailment_score
6
 
7
+ def classify_toxic_comment(comment):
8
  """
9
+ Classify a comment as toxic or non-toxic using the fine-tuned XLM-RoBERTa model.
10
+ If toxic, paraphrase the comment, re-evaluate, and compute additional Stage 3 metrics.
11
+ Returns the prediction label, confidence, color, toxicity score, bias score, paraphrased comment (if applicable), and its metrics.
12
  """
13
+ if not comment.strip():
14
+ return "Error: Please enter a comment.", None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
15
+
16
+ # Access the model and tokenizer
17
+ model = classifier_model.model
18
+ tokenizer = classifier_model.tokenizer
19
+
20
+ # Tokenize the input comment
21
+ inputs = tokenizer(comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
22
+
23
+ # Run inference
 
 
 
 
 
 
 
 
 
 
 
24
  with torch.no_grad():
25
+ outputs = model(**inputs)
26
+ logits = outputs.logits
27
+
28
+ # Get the predicted class (0 = non-toxic, 1 = toxic)
29
+ predicted_class = torch.argmax(logits, dim=1).item()
30
+ label = "Toxic" if predicted_class == 1 else "Non-Toxic"
31
+ confidence = torch.softmax(logits, dim=1)[0][predicted_class].item()
32
+ label_color = "red" if label == "Toxic" else "green"
33
+
34
+ # Compute Toxicity Score (approximated as the probability of the toxic class)
35
+ toxicity_score = torch.softmax(logits, dim=1)[0][1].item()
36
+ toxicity_score = round(toxicity_score, 2)
37
+
38
+ # Simulate Bias Score (placeholder)
39
+ bias_score = 0.01 if label == "Non-Toxic" else 0.15
40
+ bias_score = round(bias_score, 2)
41
+
42
+ # If the comment is toxic, paraphrase it and compute additional metrics
43
+ paraphrased_comment = None
44
+ paraphrased_prediction = None
45
+ paraphrased_confidence = None
46
+ paraphrased_color = None
47
+ paraphrased_toxicity_score = None
48
+ paraphrased_bias_score = None
49
+ semantic_similarity = None
50
+ original_emotion = None
51
+ paraphrased_emotion = None
52
+ emotion_shift_positive = None
53
+ empathy_score = None
54
+ bleu_score = None
55
+ rouge_scores = None
56
+ entailment_score = None
57
+
58
+ if label == "Toxic":
59
+ # Paraphrase the comment
60
+ paraphrased_comment = paraphrase_comment(comment)
61
+
62
+ # Re-evaluate the paraphrased comment
63
+ paraphrased_inputs = tokenizer(paraphrased_comment, return_tensors="pt", truncation=True, padding=True, max_length=512)
64
+ with torch.no_grad():
65
+ paraphrased_outputs = model(**paraphrased_inputs)
66
+ paraphrased_logits = paraphrased_outputs.logits
67
+
68
+ paraphrased_predicted_class = torch.argmax(paraphrased_logits, dim=1).item()
69
+ paraphrased_label = "Toxic" if paraphrased_predicted_class == 1 else "Non-Toxic"
70
+ paraphrased_confidence = torch.softmax(paraphrased_logits, dim=1)[0][paraphrased_predicted_class].item()
71
+ paraphrased_color = "red" if paraphrased_label == "Toxic" else "green"
72
+ paraphrased_toxicity_score = torch.softmax(paraphrased_logits, dim=1)[0][1].item()
73
+ paraphrased_toxicity_score = round(paraphrased_toxicity_score, 2)
74
+ paraphrased_bias_score = 0.01 if paraphrased_label == "Non-Toxic" else 0.15 # Placeholder
75
+ paraphrased_bias_score = round(paraphrased_bias_score, 2)
76
+
77
+ # Compute additional Stage 3 metrics
78
+ semantic_similarity = compute_semantic_similarity(comment, paraphrased_comment)
79
+ original_emotion, paraphrased_emotion, emotion_shift_positive = compute_emotion_shift(comment, paraphrased_comment)
80
+ empathy_score = compute_empathy_score(paraphrased_comment)
81
+ bleu_score = compute_bleu_score(comment, paraphrased_comment)
82
+ rouge_scores = compute_rouge_score(comment, paraphrased_comment)
83
+ entailment_score = compute_entailment_score(comment, paraphrased_comment)
84
+
85
+ return (
86
+ f"Prediction: {label}", confidence, label_color, toxicity_score, bias_score,
87
+ paraphrased_comment, f"Prediction: {paraphrased_label}" if paraphrased_comment else None,
88
+ paraphrased_confidence, paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
89
+ semantic_similarity, f"Original: {original_emotion}, Paraphrased: {paraphrased_emotion}, Positive Shift: {emotion_shift_positive}" if original_emotion else None,
90
+ empathy_score, bleu_score, rouge_scores, entailment_score
91
+ )