JanviMl commited on
Commit
2ec7c09
·
verified ·
1 Parent(s): a0e9408

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +349 -0
app.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from classifier import classify_toxic_comment
4
+
5
+ # Clear function for resetting the UI
6
+ def clear_inputs():
7
+ return "", 0, "", [], "", "", "", "", 0, "", "", "", "", "", "", "", ""
8
+
9
+ # Custom CSS for styling
10
+ custom_css = """
11
+ /* General Styling */
12
+ body {
13
+ font-family: 'Roboto', sans-serif;
14
+ background-color: #F5F7FA;
15
+ color: #333333;
16
+ }
17
+
18
+ /* Header Styling */
19
+ h1 {
20
+ color: #FFFFFF !important;
21
+ background-color: #1E88E5;
22
+ padding: 20px;
23
+ border-radius: 10px;
24
+ text-align: center;
25
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
26
+ margin-bottom: 20px;
27
+ }
28
+
29
+ /* Section Headers */
30
+ h3 {
31
+ color: #1E88E5;
32
+ font-weight: 600;
33
+ margin-bottom: 15px;
34
+ border-bottom: 2px solid #1E88E5;
35
+ padding-bottom: 5px;
36
+ }
37
+
38
+ /* Input Textbox */
39
+ .gr-textbox textarea {
40
+ border: 2px solid #1E88E5 !important;
41
+ border-radius: 10px !important;
42
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
43
+ transition: border-color 0.3s, box-shadow 0.3s;
44
+ }
45
+ .gr-textbox textarea:focus {
46
+ border-color: #1565C0 !important;
47
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15) !important;
48
+ }
49
+
50
+ /* Buttons */
51
+ .gr-button-primary {
52
+ background-color: #1E88E5 !important;
53
+ color: white !important;
54
+ border-radius: 10px !important;
55
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
56
+ transition: background-color 0.3s, transform 0.1s;
57
+ font-weight: 500;
58
+ }
59
+ .gr-button-primary:hover {
60
+ background-color: #1565C0 !important;
61
+ transform: translateY(-2px);
62
+ }
63
+ .gr-button-secondary {
64
+ background-color: #D32F2F !important;
65
+ color: white !important;
66
+ border-radius: 10px !important;
67
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
68
+ transition: background-color 0.3s, transform 0.1s;
69
+ font-weight: 500;
70
+ }
71
+ .gr-button-secondary:hover {
72
+ background-color: #B71C1C !important;
73
+ transform: translateY(-2px);
74
+ }
75
+
76
+ /* Sliders */
77
+ .gr-slider {
78
+ background-color: #E0E0E0 !important;
79
+ border-radius: 10px !important;
80
+ box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1);
81
+ }
82
+
83
+ /* Output Boxes */
84
+ .gr-textbox {
85
+ border: 1px solid #E0E0E0 !important;
86
+ border-radius: 10px !important;
87
+ background-color: #FFFFFF !important;
88
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
89
+ padding: 10px;
90
+ margin-bottom: 10px;
91
+ }
92
+
93
+ /* Accordion */
94
+ .gr-accordion {
95
+ border: 1px solid #E0E0E0 !important;
96
+ border-radius: 10px !important;
97
+ background-color: #FFFFFF !important;
98
+ margin-bottom: 15px;
99
+ }
100
+
101
+ /* Custom Classes for Visual Indicators */
102
+ .toxic-indicator::before {
103
+ content: "⚠️ ";
104
+ color: #D32F2F;
105
+ font-size: 20px;
106
+ }
107
+ .nontoxic-indicator::before {
108
+ content: "✅ ";
109
+ color: #388E3C;
110
+ font-size: 20px;
111
+ }
112
+
113
+ /* Loading State Animation */
114
+ @keyframes pulse {
115
+ 0% { opacity: 1; }
116
+ 50% { opacity: 0.5; }
117
+ 100% { opacity: 1; }
118
+ }
119
+ .loading {
120
+ animation: pulse 1.5s infinite;
121
+ }
122
+ """
123
+
124
+ # Main UI function
125
+ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
126
+ # Header Section
127
+ gr.Markdown(
128
+ """
129
+ # Toxic Comment Classifier
130
+ Enter a comment below to check if it's toxic or non-toxic. This app uses a fine-tuned XLM-RoBERTa model to classify comments, paraphrases toxic comments, and evaluates the output with advanced metrics.
131
+ """
132
+ )
133
+
134
+ # Input Section
135
+ with gr.Row():
136
+ with gr.Column(scale=4, min_width=600):
137
+ comment_input = gr.Textbox(
138
+ label="Your Comment",
139
+ placeholder="Type your comment here...",
140
+ lines=3,
141
+ max_lines=5
142
+ )
143
+ with gr.Column(scale=1, min_width=200):
144
+ submit_btn = gr.Button("Classify Comment", variant="primary")
145
+ clear_btn = gr.Button("Clear", variant="secondary")
146
+
147
+ gr.Examples(
148
+ examples=[
149
+ "I love this community, it's so supportive!",
150
+ "You are an idiot and should leave this platform.",
151
+ "This app is amazing, great work!"
152
+ ],
153
+ inputs=comment_input,
154
+ label="Try these examples:"
155
+ )
156
+
157
+ # Output Section
158
+ with gr.Row():
159
+ # Left Column: Original Comment Metrics
160
+ with gr.Column(scale=1, min_width=400):
161
+ gr.Markdown("### Original Comment Analysis")
162
+ prediction_output = gr.Textbox(label="Prediction", placeholder="Prediction will appear here...")
163
+ label_display = gr.HTML()
164
+ confidence_output = gr.Slider(
165
+ label="Confidence",
166
+ minimum=0,
167
+ maximum=1,
168
+ value=0,
169
+ interactive=False
170
+ )
171
+ toxicity_output = gr.Textbox(label="Toxicity Score", placeholder="Toxicity score will appear here...")
172
+ bias_output = gr.Textbox(label="Bias Score", placeholder="Bias score will appear here...")
173
+ threshold_display = gr.HTML()
174
+
175
+ # Right Column: Paraphrased Output (if Toxic)
176
+ with gr.Column(scale=1, min_width=400):
177
+ with gr.Accordion("Paraphrased Output (if Toxic)", open=False):
178
+ paraphrased_comment_output = gr.Textbox(label="Paraphrased Comment", placeholder="Paraphrased comment will appear here if the input is toxic...")
179
+ paraphrased_prediction_output = gr.Textbox(label="Paraphrased Prediction", placeholder="Prediction will appear here...")
180
+ paraphrased_label_display = gr.HTML()
181
+ paraphrased_confidence_output = gr.Slider(
182
+ label="Paraphrased Confidence",
183
+ minimum=0,
184
+ maximum=1,
185
+ value=0,
186
+ interactive=False
187
+ )
188
+ paraphrased_toxicity_output = gr.Textbox(label="Paraphrased Toxicity Score", placeholder="Toxicity score will appear here...")
189
+ paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
190
+ semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
191
+ emotion_shift_output = gr.Textbox(label="Emotion Shift", placeholder="Emotion shift will appear here...")
192
+ empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
193
+ bleu_score_output = gr.Textbox(label="BLEU Score", placeholder="BLEU score will appear here...")
194
+ rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
195
+ entailment_score_output = gr.Textbox(label="Entailment Score (Factual Consistency)", placeholder="Entailment score will appear here...")
196
+
197
+ # History and Feedback Sections
198
+ with gr.Row():
199
+ with gr.Column(scale=1):
200
+ with gr.Accordion("Prediction History", open=False):
201
+ history_output = gr.JSON(label="Previous Predictions")
202
+
203
+ with gr.Column(scale=1):
204
+ with gr.Accordion("Provide Feedback", open=False):
205
+ feedback_input = gr.Radio(
206
+ choices=["Yes, the prediction was correct", "No, the prediction was incorrect"],
207
+ label="Was this prediction correct?"
208
+ )
209
+ feedback_comment = gr.Textbox(label="Additional Comments (optional)", placeholder="Let us know your thoughts...")
210
+ feedback_submit = gr.Button("Submit Feedback")
211
+ feedback_output = gr.Textbox(label="Feedback Status")
212
+
213
+ def handle_classification(comment, history):
214
+ if history is None:
215
+ history = []
216
+ (
217
+ prediction, confidence, color, toxicity_score, bias_score,
218
+ paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
219
+ paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
220
+ semantic_similarity, emotion_shift, empathy_score,
221
+ bleu_score, rouge_scores, entailment_score
222
+ ) = classify_toxic_comment(comment)
223
+
224
+ history.append({
225
+ "comment": comment,
226
+ "prediction": prediction,
227
+ "confidence": confidence,
228
+ "toxicity_score": toxicity_score,
229
+ "bias_score": bias_score,
230
+ "paraphrased_comment": paraphrased_comment,
231
+ "paraphrased_prediction": paraphrased_prediction,
232
+ "paraphrased_confidence": paraphrased_confidence,
233
+ "paraphrased_toxicity_score": paraphrased_toxicity_score,
234
+ "paraphrased_bias_score": paraphrased_bias_score,
235
+ "semantic_similarity": semantic_similarity,
236
+ "emotion_shift": emotion_shift,
237
+ "empathy_score": empathy_score,
238
+ "bleu_score": bleu_score,
239
+ "rouge_scores": rouge_scores,
240
+ "entailment_score": entailment_score
241
+ })
242
+
243
+ threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
244
+ threshold_color = "green" if confidence >= 0.7 else "orange"
245
+ toxicity_display = f"{toxicity_score} (Scale: 0 to 1, lower is less toxic)" if toxicity_score is not None else "N/A"
246
+ bias_display = f"{bias_score} (Scale: 0 to 1, lower indicates less bias)" if bias_score is not None else "N/A"
247
+
248
+ paraphrased_comment_display = paraphrased_comment if paraphrased_comment else "N/A (Comment was non-toxic)"
249
+ paraphrased_prediction_display = paraphrased_prediction if paraphrased_prediction else "N/A"
250
+ paraphrased_confidence_display = paraphrased_confidence if paraphrased_confidence else 0
251
+ paraphrased_toxicity_display = f"{paraphrased_toxicity_score} (Scale: 0 to 1, lower is less toxic)" if paraphrased_toxicity_score is not None else "N/A"
252
+ paraphrased_bias_display = f"{paraphrased_bias_score} (Scale: 0 to 1, lower indicates less bias)" if paraphrased_bias_score is not None else "N/A"
253
+ paraphrased_label_html = (
254
+ f"<span class='{'toxic-indicator' if 'Toxic' in paraphrased_prediction else 'nontoxic-indicator'}' "
255
+ f"style='color: {paraphrased_color}; font-size: 20px; font-weight: bold;'>{paraphrased_prediction}</span>"
256
+ if paraphrased_prediction else ""
257
+ )
258
+ semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
259
+ emotion_shift_display = emotion_shift if emotion_shift else "N/A"
260
+ empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
261
+ bleu_score_display = f"{bleu_score} (Scale: 0 to 1, higher is better)" if bleu_score is not None else "N/A"
262
+ rouge_scores_display = (
263
+ f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
264
+ if rouge_scores else "N/A"
265
+ )
266
+ entailment_score_display = f"{entailment_score} (Scale: 0 to 1, higher indicates better consistency)" if entailment_score is not None else "N/A"
267
+
268
+ # Add visual indicator to the prediction
269
+ prediction_class = "toxic-indicator" if "Toxic" in prediction else "nontoxic-indicator"
270
+ prediction_html = f"<span class='{prediction_class}' style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>"
271
+
272
+ return (
273
+ prediction, confidence, prediction_html, history, threshold_message, threshold_color,
274
+ toxicity_display, bias_display,
275
+ paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
276
+ paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
277
+ semantic_similarity_display, emotion_shift_display, empathy_score_display,
278
+ bleu_score_display, rouge_scores_display, entailment_score_display
279
+ )
280
+
281
+ def handle_feedback(feedback, comment):
282
+ return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}"
283
+
284
+ submit_btn.click(
285
+ fn=lambda: (
286
+ "Classifying... <span class='loading'>⏳</span>", 0, "", None, "", "",
287
+ "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
288
+ "Paraphrasing... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", 0,
289
+ "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", "",
290
+ "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
291
+ "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
292
+ "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>"
293
+ ), # Show loading state with animation
294
+ inputs=[],
295
+ outputs=[
296
+ prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
297
+ toxicity_output, bias_output,
298
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
299
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
300
+ semantic_similarity_output, emotion_shift_output, empathy_score_output,
301
+ bleu_score_output, rouge_scores_output, entailment_score_output
302
+ ]
303
+ ).then(
304
+ fn=handle_classification,
305
+ inputs=[comment_input, history_output],
306
+ outputs=[
307
+ prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
308
+ toxicity_output, bias_output,
309
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
310
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
311
+ semantic_similarity_output, emotion_shift_output, empathy_score_output,
312
+ bleu_score_output, rouge_scores_output, entailment_score_output
313
+ ]
314
+ ).then(
315
+ fn=lambda prediction, confidence, html: html,
316
+ inputs=[prediction_output, confidence_output, label_display],
317
+ outputs=label_display
318
+ ).then(
319
+ fn=lambda threshold_message, threshold_color: f"<span style='color: {threshold_color}; font-size: 16px;'>{threshold_message}</span>",
320
+ inputs=[threshold_display, threshold_display],
321
+ outputs=threshold_display
322
+ )
323
+
324
+ feedback_submit.click(
325
+ fn=handle_feedback,
326
+ inputs=[feedback_input, feedback_comment],
327
+ outputs=feedback_output
328
+ )
329
+
330
+ clear_btn.click(
331
+ fn=clear_inputs,
332
+ inputs=[],
333
+ outputs=[
334
+ comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
335
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
336
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
337
+ semantic_similarity_output, emotion_shift_output, empathy_score_output,
338
+ bleu_score_output, rouge_scores_output, entailment_score_output
339
+ ]
340
+ )
341
+
342
+ gr.Markdown(
343
+ """
344
+ ---
345
+ **About**: This app is part of a four-stage pipeline for automated toxic comment moderation with emotional intelligence via RLHF. Built with ❤️ using Hugging Face and Gradio.
346
+ """
347
+ )
348
+
349
+ demo.launch()