Spaces:
Sleeping
Sleeping
Update paraphraser.py
Browse files- paraphraser.py +17 -2
paraphraser.py
CHANGED
@@ -2,13 +2,28 @@
|
|
2 |
import torch
|
3 |
from model_loader import paraphrase_model, paraphrase_tokenizer
|
4 |
|
5 |
-
def paraphrase_comment(comment
|
6 |
"""
|
7 |
Paraphrase a toxic comment using the Granite 3.2-2B-Instruct model.
|
8 |
Returns the paraphrased comment.
|
9 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# Format the prompt with the input comment
|
11 |
-
prompt =
|
12 |
|
13 |
# Tokenize the prompt
|
14 |
inputs = paraphrase_tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
|
|
2 |
import torch
|
3 |
from model_loader import paraphrase_model, paraphrase_tokenizer
|
4 |
|
5 |
+
def paraphrase_comment(comment):
|
6 |
"""
|
7 |
Paraphrase a toxic comment using the Granite 3.2-2B-Instruct model.
|
8 |
Returns the paraphrased comment.
|
9 |
"""
|
10 |
+
# Define the paraphrasing prompt with system instruction, guidelines, examples, and the task
|
11 |
+
prompt = (
|
12 |
+
"You are a content moderator tasked with rewriting toxic comments into neutral and constructive ones while maintaining the original meaning.\n"
|
13 |
+
"Guidelines:\n"
|
14 |
+
"- Remove explicit hate speech, personal attacks, or offensive language.\n"
|
15 |
+
"- Keep the response neutral and professional.\n"
|
16 |
+
"- Ensure the rewritten comment retains the original intent but in a constructive tone.\n"
|
17 |
+
"Examples:\n"
|
18 |
+
"Toxic: \"You're so dumb! You never understand anything!\"\n"
|
19 |
+
"Neutral: \"I think there's some misunderstanding. Let's clarify things.\"\n"
|
20 |
+
"Toxic: \"This is the worst idea ever. Only an idiot would suggest this.\"\n"
|
21 |
+
"Neutral: \"I don't think this idea works well. Maybe we can explore other options.\"\n"
|
22 |
+
"Now, rewrite this comment: \"{comment}\""
|
23 |
+
)
|
24 |
+
|
25 |
# Format the prompt with the input comment
|
26 |
+
prompt = prompt.format(comment=comment)
|
27 |
|
28 |
# Tokenize the prompt
|
29 |
inputs = paraphrase_tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512)
|