Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
from classifier import classify_toxic_comment | |
# Clear function for resetting the UI | |
def clear_inputs(): | |
return "", 0, "", [] | |
# Custom CSS for styling | |
custom_css = """ | |
.gr-button-primary { | |
background-color: #4CAF50 !important; | |
color: white !important; | |
} | |
.gr-button-secondary { | |
background-color: #f44336 !important; | |
color: white !important; | |
} | |
.gr-textbox textarea { | |
border: 2px solid #2196F3 !important; | |
border-radius: 8px !important; | |
} | |
.gr-slider { | |
background-color: #e0e0e0 !important; | |
border-radius: 10px !important; | |
} | |
""" | |
# Main UI function | |
with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo: | |
gr.Markdown( | |
""" | |
# Toxic Comment Classifier | |
Enter a comment below to check if it's toxic or non-toxic. This app uses a fine-tuned XLM-RoBERTa model to classify comments as part of a four-stage pipeline for automated toxic comment moderation. | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(scale=3): | |
comment_input = gr.Textbox( | |
label="Your Comment", | |
placeholder="Type your comment here...", | |
lines=3, | |
max_lines=5 | |
) | |
with gr.Column(scale=1): | |
submit_btn = gr.Button("Classify Comment", variant="primary") | |
clear_btn = gr.Button("Clear", variant="secondary") | |
gr.Examples( | |
examples=[ | |
"I love this community, it's so supportive!", | |
"You are an idiot and should leave this platform.", | |
"This app is amazing, great work!" | |
], | |
inputs=comment_input, | |
label="Try these examples:" | |
) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
prediction_output = gr.Textbox(label="Prediction", placeholder="Prediction will appear here...") | |
with gr.Column(scale=1): | |
confidence_output = gr.Slider( | |
label="Confidence", | |
minimum=0, | |
maximum=1, | |
value=0, | |
interactive=False | |
) | |
with gr.Row(): | |
label_display = gr.HTML() | |
threshold_display = gr.HTML() | |
with gr.Accordion("Prediction History", open=False): | |
history_output = gr.JSON(label="Previous Predictions") | |
with gr.Accordion("Provide Feedback", open=False): | |
feedback_input = gr.Radio( | |
choices=["Yes, the prediction was correct", "No, the prediction was incorrect"], | |
label="Was this prediction correct?" | |
) | |
feedback_comment = gr.Textbox(label="Additional Comments (optional)", placeholder="Let us know your thoughts...") | |
feedback_submit = gr.Button("Submit Feedback") | |
feedback_output = gr.Textbox(label="Feedback Status") | |
def handle_classification(comment, history): | |
if history is None: | |
history = [] | |
prediction, confidence, color = classify_toxic_comment(comment) | |
history.append({"comment": comment, "prediction": prediction, "confidence": confidence}) | |
threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence" | |
threshold_color = "green" if confidence >= 0.7 else "orange" | |
return prediction, confidence, color, history, threshold_message, threshold_color | |
def handle_feedback(feedback, comment): | |
return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}" | |
submit_btn.click( | |
fn=lambda: ("Classifying...", 0, "", None, "", ""), # Show loading state | |
inputs=[], | |
outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display] | |
).then( | |
fn=handle_classification, | |
inputs=[comment_input, history_output], | |
outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display] | |
).then( | |
fn=lambda prediction, confidence, color: f"<span style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>", | |
inputs=[prediction_output, confidence_output, label_display], | |
outputs=label_display | |
).then( | |
fn=lambda threshold_message, threshold_color: f"<span style='color: {threshold_color}; font-size: 16px;'>{threshold_message}</span>", | |
inputs=[threshold_display, threshold_display], | |
outputs=threshold_display | |
) | |
feedback_submit.click( | |
fn=handle_feedback, | |
inputs=[feedback_input, feedback_comment], | |
outputs=feedback_output | |
) | |
clear_btn.click( | |
fn=clear_inputs, | |
inputs=[], | |
outputs=[comment_input, confidence_output, label_display, history_output] | |
) | |
gr.Markdown( | |
""" | |
--- | |
**About**: This app is part of a four-stage pipeline for automated toxic comment moderation with emotional intelligence via RLHF. Built with ❤️ using Hugging Face and Gradio. | |
""" | |
) | |
demo.launch() |