JanviMl commited on
Commit
2550e3f
·
verified ·
1 Parent(s): bf242e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -7,7 +7,7 @@ def clear_inputs():
7
  Reset all UI input and output fields to their default values.
8
  Returns a tuple of empty or default values for all UI components.
9
  """
10
- return "", 0, "", [], "", "", "", "", 0, "", "", "", "", "", ""
11
 
12
  custom_css = """
13
  /* General Styling */
@@ -185,7 +185,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
185
  paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
186
  semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
187
  empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
188
- rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
189
 
190
  with gr.Row():
191
  with gr.Column(scale=1):
@@ -209,7 +208,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
209
  prediction, confidence, color, toxicity_score, bias_score,
210
  paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
211
  paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
212
- semantic_similarity, empathy_score, rouge_scores
213
  ) = classify_toxic_comment(comment)
214
 
215
  history.append({
@@ -224,8 +223,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
224
  "paraphrased_toxicity_score": paraphrased_toxicity_score,
225
  "paraphrased_bias_score": paraphrased_bias_score,
226
  "semantic_similarity": semantic_similarity,
227
- "empathy_score": empathy_score,
228
- "rouge_scores": rouge_scores
229
  })
230
 
231
  threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
@@ -245,10 +243,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
245
  )
246
  semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
247
  empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
248
- rouge_scores_display = (
249
- f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
250
- if rouge_scores else "N/A"
251
- )
252
 
253
  prediction_class = "toxic-indicator" if "Toxic" in prediction else "nontoxic-indicator"
254
  prediction_html = f"<span class='{prediction_class}' style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>"
@@ -258,7 +252,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
258
  toxicity_display, bias_display,
259
  paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
260
  paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
261
- semantic_similarity_display, empathy_score_display, rouge_scores_display
262
  )
263
 
264
  def handle_feedback(feedback, comment):
@@ -270,7 +264,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
270
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
271
  "Paraphrasing... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", 0,
272
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", "",
273
- "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
274
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>"
275
  ),
276
  inputs=[],
@@ -279,7 +272,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
279
  toxicity_output, bias_output,
280
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
281
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
282
- semantic_similarity_output, empathy_score_output, rouge_scores_output
283
  ]
284
  ).then(
285
  fn=handle_classification,
@@ -289,7 +282,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
289
  toxicity_output, bias_output,
290
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
291
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
292
- semantic_similarity_output, empathy_score_output, rouge_scores_output
293
  ]
294
  ).then(
295
  fn=lambda prediction, confidence, html: html,
@@ -314,7 +307,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
314
  comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
315
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
316
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
317
- semantic_similarity_output, empathy_score_output, rouge_scores_output
318
  ]
319
  )
320
 
 
7
  Reset all UI input and output fields to their default values.
8
  Returns a tuple of empty or default values for all UI components.
9
  """
10
+ return "", 0, "", [], "", "", "", "", 0, "", "", "", ""
11
 
12
  custom_css = """
13
  /* General Styling */
 
185
  paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
186
  semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
187
  empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
 
188
 
189
  with gr.Row():
190
  with gr.Column(scale=1):
 
208
  prediction, confidence, color, toxicity_score, bias_score,
209
  paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
210
  paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
211
+ semantic_similarity, empathy_score
212
  ) = classify_toxic_comment(comment)
213
 
214
  history.append({
 
223
  "paraphrased_toxicity_score": paraphrased_toxicity_score,
224
  "paraphrased_bias_score": paraphrased_bias_score,
225
  "semantic_similarity": semantic_similarity,
226
+ "empathy_score": empathy_score
 
227
  })
228
 
229
  threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
 
243
  )
244
  semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
245
  empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
 
 
 
 
246
 
247
  prediction_class = "toxic-indicator" if "Toxic" in prediction else "nontoxic-indicator"
248
  prediction_html = f"<span class='{prediction_class}' style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>"
 
252
  toxicity_display, bias_display,
253
  paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
254
  paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
255
+ semantic_similarity_display, empathy_score_display
256
  )
257
 
258
  def handle_feedback(feedback, comment):
 
264
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>",
265
  "Paraphrasing... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", 0,
266
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>", "",
 
267
  "Calculating... <span class='loading'>⏳</span>", "Calculating... <span class='loading'>⏳</span>"
268
  ),
269
  inputs=[],
 
272
  toxicity_output, bias_output,
273
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
274
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
275
+ semantic_similarity_output, empathy_score_output
276
  ]
277
  ).then(
278
  fn=handle_classification,
 
282
  toxicity_output, bias_output,
283
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
284
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
285
+ semantic_similarity_output, empathy_score_output
286
  ]
287
  ).then(
288
  fn=lambda prediction, confidence, html: html,
 
307
  comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
308
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
309
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
310
+ semantic_similarity_output, empathy_score_output
311
  ]
312
  )
313