Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ def clear_inputs():
|
|
7 |
Reset all UI input and output fields to their default values.
|
8 |
Returns a tuple of empty or default values for all UI components.
|
9 |
"""
|
10 |
-
return "", 0, "", [], "", "", "", "", 0, "", "", "", "", "", ""
|
11 |
|
12 |
custom_css = """
|
13 |
/* General Styling */
|
@@ -184,11 +184,9 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
184 |
paraphrased_toxicity_output = gr.Textbox(label="Paraphrased Toxicity Score", placeholder="Toxicity score will appear here...")
|
185 |
paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
|
186 |
semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
|
187 |
-
emotion_shift_output = gr.Textbox(label="Emotion Shift", placeholder="Emotion shift will appear here...")
|
188 |
empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
|
189 |
bleu_score_output = gr.Textbox(label="BLEU Score", placeholder="BLEU score will appear here...")
|
190 |
rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
|
191 |
-
entailment_score_output = gr.Textbox(label="Entailment Score (Factual Consistency)", placeholder="Entailment score will appear here...")
|
192 |
|
193 |
with gr.Row():
|
194 |
with gr.Column(scale=1):
|
@@ -212,8 +210,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
212 |
prediction, confidence, color, toxicity_score, bias_score,
|
213 |
paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
|
214 |
paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
|
215 |
-
semantic_similarity,
|
216 |
-
bleu_score, rouge_scores, entailment_score
|
217 |
) = classify_toxic_comment(comment)
|
218 |
|
219 |
history.append({
|
@@ -228,11 +225,9 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
228 |
"paraphrased_toxicity_score": paraphrased_toxicity_score,
|
229 |
"paraphrased_bias_score": paraphrased_bias_score,
|
230 |
"semantic_similarity": semantic_similarity,
|
231 |
-
"emotion_shift": emotion_shift,
|
232 |
"empathy_score": empathy_score,
|
233 |
"bleu_score": bleu_score,
|
234 |
-
"rouge_scores": rouge_scores
|
235 |
-
"entailment_score": entailment_score
|
236 |
})
|
237 |
|
238 |
threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
|
@@ -251,14 +246,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
251 |
if paraphrased_prediction else ""
|
252 |
)
|
253 |
semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
|
254 |
-
emotion_shift_display = emotion_shift if emotion_shift else "N/A"
|
255 |
empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
|
256 |
bleu_score_display = f"{bleu_score} (Scale: 0 to 1, higher is better)" if bleu_score is not None else "N/A"
|
257 |
rouge_scores_display = (
|
258 |
f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
|
259 |
if rouge_scores else "N/A"
|
260 |
)
|
261 |
-
entailment_score_display = f"{entailment_score} (Scale: 0 to 1, higher indicates better consistency)" if entailment_score is not None else "N/A"
|
262 |
|
263 |
prediction_class = "toxic-indicator" if "Toxic" in prediction else "nontoxic-indicator"
|
264 |
prediction_html = f"<span class='{prediction_class}' style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>"
|
@@ -268,8 +261,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
268 |
toxicity_display, bias_display,
|
269 |
paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
|
270 |
paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
|
271 |
-
semantic_similarity_display,
|
272 |
-
bleu_score_display, rouge_scores_display, entailment_score_display
|
273 |
)
|
274 |
|
275 |
def handle_feedback(feedback, comment):
|
@@ -282,7 +274,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
282 |
"Paraphrasing... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", 0,
|
283 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", "",
|
284 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
|
285 |
-
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
|
286 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>"
|
287 |
),
|
288 |
inputs=[],
|
@@ -291,8 +282,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
291 |
toxicity_output, bias_output,
|
292 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
293 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
294 |
-
semantic_similarity_output,
|
295 |
-
bleu_score_output, rouge_scores_output, entailment_score_output
|
296 |
]
|
297 |
).then(
|
298 |
fn=handle_classification,
|
@@ -302,8 +292,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
302 |
toxicity_output, bias_output,
|
303 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
304 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
305 |
-
semantic_similarity_output,
|
306 |
-
bleu_score_output, rouge_scores_output, entailment_score_output
|
307 |
]
|
308 |
).then(
|
309 |
fn=lambda prediction, confidence, html: html,
|
@@ -328,8 +317,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
|
328 |
comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
|
329 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
330 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
331 |
-
semantic_similarity_output,
|
332 |
-
bleu_score_output, rouge_scores_output, entailment_score_output
|
333 |
]
|
334 |
)
|
335 |
|
|
|
7 |
Reset all UI input and output fields to their default values.
|
8 |
Returns a tuple of empty or default values for all UI components.
|
9 |
"""
|
10 |
+
return "", 0, "", [], "", "", "", "", 0, "", "", "", "", "", ""
|
11 |
|
12 |
custom_css = """
|
13 |
/* General Styling */
|
|
|
184 |
paraphrased_toxicity_output = gr.Textbox(label="Paraphrased Toxicity Score", placeholder="Toxicity score will appear here...")
|
185 |
paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
|
186 |
semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
|
|
|
187 |
empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
|
188 |
bleu_score_output = gr.Textbox(label="BLEU Score", placeholder="BLEU score will appear here...")
|
189 |
rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
|
|
|
190 |
|
191 |
with gr.Row():
|
192 |
with gr.Column(scale=1):
|
|
|
210 |
prediction, confidence, color, toxicity_score, bias_score,
|
211 |
paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
|
212 |
paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
|
213 |
+
semantic_similarity, empathy_score, bleu_score, rouge_scores
|
|
|
214 |
) = classify_toxic_comment(comment)
|
215 |
|
216 |
history.append({
|
|
|
225 |
"paraphrased_toxicity_score": paraphrased_toxicity_score,
|
226 |
"paraphrased_bias_score": paraphrased_bias_score,
|
227 |
"semantic_similarity": semantic_similarity,
|
|
|
228 |
"empathy_score": empathy_score,
|
229 |
"bleu_score": bleu_score,
|
230 |
+
"rouge_scores": rouge_scores
|
|
|
231 |
})
|
232 |
|
233 |
threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
|
|
|
246 |
if paraphrased_prediction else ""
|
247 |
)
|
248 |
semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
|
|
|
249 |
empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
|
250 |
bleu_score_display = f"{bleu_score} (Scale: 0 to 1, higher is better)" if bleu_score is not None else "N/A"
|
251 |
rouge_scores_display = (
|
252 |
f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
|
253 |
if rouge_scores else "N/A"
|
254 |
)
|
|
|
255 |
|
256 |
prediction_class = "toxic-indicator" if "Toxic" in prediction else "nontoxic-indicator"
|
257 |
prediction_html = f"<span class='{prediction_class}' style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>"
|
|
|
261 |
toxicity_display, bias_display,
|
262 |
paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
|
263 |
paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
|
264 |
+
semantic_similarity_display, empathy_score_display, bleu_score_display, rouge_scores_display
|
|
|
265 |
)
|
266 |
|
267 |
def handle_feedback(feedback, comment):
|
|
|
274 |
"Paraphrasing... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", 0,
|
275 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", "",
|
276 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
|
|
|
277 |
"Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>"
|
278 |
),
|
279 |
inputs=[],
|
|
|
282 |
toxicity_output, bias_output,
|
283 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
284 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
285 |
+
semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
|
|
|
286 |
]
|
287 |
).then(
|
288 |
fn=handle_classification,
|
|
|
292 |
toxicity_output, bias_output,
|
293 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
294 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
295 |
+
semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
|
|
|
296 |
]
|
297 |
).then(
|
298 |
fn=lambda prediction, confidence, html: html,
|
|
|
317 |
comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
|
318 |
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
319 |
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
320 |
+
semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
|
|
|
321 |
]
|
322 |
)
|
323 |
|