JanviMl commited on
Commit
c49cebe
·
verified ·
1 Parent(s): bd229ab

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -0
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from classifier import classify_toxic_comment
4
+
5
+ # Clear function for resetting the UI
6
+ def clear_inputs():
7
+ return "", 0, "", []
8
+
9
+ # Custom CSS for styling
10
+ custom_css = """
11
+ .gr-button-primary {
12
+ background-color: #4CAF50 !important;
13
+ color: white !important;
14
+ }
15
+ .gr-button-secondary {
16
+ background-color: #f44336 !important;
17
+ color: white !important;
18
+ }
19
+ .gr-textbox textarea {
20
+ border: 2px solid #2196F3 !important;
21
+ border-radius: 8px !important;
22
+ }
23
+ .gr-slider {
24
+ background-color: #e0e0e0 !important;
25
+ border-radius: 10px !important;
26
+ }
27
+ """
28
+
29
+ # Main UI function
30
+ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
31
+ gr.Markdown(
32
+ """
33
+ # Toxic Comment Classifier
34
+ Enter a comment below to check if it's toxic or non-toxic. This app uses a fine-tuned XLM-RoBERTa model to classify comments as part of a four-stage pipeline for automated toxic comment moderation.
35
+ """
36
+ )
37
+
38
+ with gr.Row():
39
+ with gr.Column(scale=3):
40
+ comment_input = gr.Textbox(
41
+ label="Your Comment",
42
+ placeholder="Type your comment here...",
43
+ lines=3,
44
+ max_lines=5
45
+ )
46
+ with gr.Column(scale=1):
47
+ submit_btn = gr.Button("Classify Comment", variant="primary")
48
+ clear_btn = gr.Button("Clear", variant="secondary")
49
+
50
+ gr.Examples(
51
+ examples=[
52
+ "I love this community, it's so supportive!",
53
+ "You are an idiot and should leave this platform.",
54
+ "This app is amazing, great work!"
55
+ ],
56
+ inputs=comment_input,
57
+ label="Try these examples:"
58
+ )
59
+
60
+ with gr.Row():
61
+ with gr.Column(scale=2):
62
+ prediction_output = gr.Textbox(label="Prediction", placeholder="Prediction will appear here...")
63
+ with gr.Column(scale=1):
64
+ confidence_output = gr.Slider(
65
+ label="Confidence",
66
+ minimum=0,
67
+ maximum=1,
68
+ value=0,
69
+ interactive=False
70
+ )
71
+
72
+ with gr.Row():
73
+ label_display = gr.HTML()
74
+ threshold_display = gr.HTML()
75
+
76
+ with gr.Accordion("Prediction History", open=False):
77
+ history_output = gr.JSON(label="Previous Predictions")
78
+
79
+ with gr.Accordion("Provide Feedback", open=False):
80
+ feedback_input = gr.Radio(
81
+ choices=["Yes, the prediction was correct", "No, the prediction was incorrect"],
82
+ label="Was this prediction correct?"
83
+ )
84
+ feedback_comment = gr.Textbox(label="Additional Comments (optional)", placeholder="Let us know your thoughts...")
85
+ feedback_submit = gr.Button("Submit Feedback")
86
+ feedback_output = gr.Textbox(label="Feedback Status")
87
+
88
+ def handle_classification(comment, history):
89
+ if history is None:
90
+ history = []
91
+ prediction, confidence, color = classify_toxic_comment(comment)
92
+ history.append({"comment": comment, "prediction": prediction, "confidence": confidence})
93
+ threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
94
+ threshold_color = "green" if confidence >= 0.7 else "orange"
95
+ return prediction, confidence, color, history, threshold_message, threshold_color
96
+
97
+ def handle_feedback(feedback, comment):
98
+ return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}"
99
+
100
+ submit_btn.click(
101
+ fn=lambda: ("Classifying...", 0, "", None, "", ""), # Show loading state
102
+ inputs=[],
103
+ outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display]
104
+ ).then(
105
+ fn=handle_classification,
106
+ inputs=[comment_input, history_output],
107
+ outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display]
108
+ ).then(
109
+ fn=lambda prediction, confidence, color: f"<span style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>",
110
+ inputs=[prediction_output, confidence_output, label_display],
111
+ outputs=label_display
112
+ ).then(
113
+ fn=lambda threshold_message, threshold_color: f"<span style='color: {threshold_color}; font-size: 16px;'>{threshold_message}</span>",
114
+ inputs=[threshold_display, threshold_display],
115
+ outputs=threshold_display
116
+ )
117
+
118
+ feedback_submit.click(
119
+ fn=handle_feedback,
120
+ inputs=[feedback_input, feedback_comment],
121
+ outputs=feedback_output
122
+ )
123
+
124
+ clear_btn.click(
125
+ fn=clear_inputs,
126
+ inputs=[],
127
+ outputs=[comment_input, confidence_output, label_display, history_output]
128
+ )
129
+
130
+ gr.Markdown(
131
+ """
132
+ ---
133
+ **About**: This app is part of a four-stage pipeline for automated toxic comment moderation with emotional intelligence via RLHF. Built with ❤️ using Hugging Face and Gradio.
134
+ """
135
+ )
136
+
137
+ demo.launch()