toxic-comment-classifier / paraphraser.py
JanviMl's picture
Update paraphraser.py
b69b713 verified
raw
history blame
2.03 kB
# paraphraser.py
from model_loader import paraphraser_model
def paraphrase_comment(comment):
"""
Paraphrase a toxic comment using the Granite 3.2-2B-Instruct model.
Returns the paraphrased comment.
"""
if not comment:
return None
try:
model = paraphraser_model.model
tokenizer = paraphraser_model.tokenizer
# Create a detailed prompt with guidelines and examples
prompt = (
"You are a content moderator tasked with rewriting toxic comments into neutral and constructive ones while maintaining the original meaning. "
"Follow these guidelines:\n"
"- Remove explicit hate speech, personal attacks, or offensive language.\n"
"- Keep the response neutral and professional.\n"
"- Ensure the rewritten comment retains the original intent but in a constructive tone.\n\n"
"Examples:\n"
"Toxic: \"You're so dumb! You never understand anything!\"\n"
"Neutral: \"I think there's some misunderstanding. Let's clarify things.\"\n"
"Toxic: \"This is the worst idea ever. Only an idiot would suggest this.\"\n"
"Neutral: \"I don't think this idea works well. Maybe we can explore other options.\"\n\n"
f"Now, rewrite this comment: \"{comment}\""
)
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512)
# Generate the paraphrased comment
outputs = model.generate(
**inputs,
max_length=512,
num_return_sequences=1,
temperature=0.7,
top_p=0.9,
do_sample=True
)
paraphrased_comment = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the prompt part from the output
paraphrased_comment = paraphrased_comment.replace(prompt, "").strip()
return paraphrased_comment
except Exception as e:
return f"Error paraphrasing comment: {str(e)}"