JanviMl commited on
Commit
e5f485f
·
verified ·
1 Parent(s): efe1e41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -185,7 +185,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
185
  paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
186
  semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
187
  empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
188
- bleu_score_output = gr.Textbox(label="BLEU Score", placeholder="BLEU score will appear here...")
189
  rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
190
 
191
  with gr.Row():
@@ -210,7 +209,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
210
  prediction, confidence, color, toxicity_score, bias_score,
211
  paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
212
  paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
213
- semantic_similarity, empathy_score, bleu_score, rouge_scores
214
  ) = classify_toxic_comment(comment)
215
 
216
  history.append({
@@ -226,7 +225,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
226
  "paraphrased_bias_score": paraphrased_bias_score,
227
  "semantic_similarity": semantic_similarity,
228
  "empathy_score": empathy_score,
229
- "bleu_score": bleu_score,
230
  "rouge_scores": rouge_scores
231
  })
232
 
@@ -247,7 +245,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
247
  )
248
  semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
249
  empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
250
- bleu_score_display = f"{bleu_score} (Scale: 0 to 1, higher is better)" if bleu_score is not None else "N/A"
251
  rouge_scores_display = (
252
  f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
253
  if rouge_scores else "N/A"
@@ -261,7 +258,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
261
  toxicity_display, bias_display,
262
  paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
263
  paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
264
- semantic_similarity_display, empathy_score_display, bleu_score_display, rouge_scores_display
265
  )
266
 
267
  def handle_feedback(feedback, comment):
@@ -282,7 +279,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
282
  toxicity_output, bias_output,
283
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
284
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
285
- semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
286
  ]
287
  ).then(
288
  fn=handle_classification,
@@ -292,7 +289,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
292
  toxicity_output, bias_output,
293
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
294
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
295
- semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
296
  ]
297
  ).then(
298
  fn=lambda prediction, confidence, html: html,
@@ -317,7 +314,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
317
  comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
318
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
319
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
320
- semantic_similarity_output, empathy_score_output, bleu_score_output, rouge_scores_output
321
  ]
322
  )
323
 
 
185
  paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
186
  semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
187
  empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
 
188
  rouge_scores_output = gr.Textbox(label="ROUGE Scores", placeholder="ROUGE scores will appear here...")
189
 
190
  with gr.Row():
 
209
  prediction, confidence, color, toxicity_score, bias_score,
210
  paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
211
  paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
212
+ semantic_similarity, empathy_score, rouge_scores
213
  ) = classify_toxic_comment(comment)
214
 
215
  history.append({
 
225
  "paraphrased_bias_score": paraphrased_bias_score,
226
  "semantic_similarity": semantic_similarity,
227
  "empathy_score": empathy_score,
 
228
  "rouge_scores": rouge_scores
229
  })
230
 
 
245
  )
246
  semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
247
  empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
 
248
  rouge_scores_display = (
249
  f"ROUGE-1: {rouge_scores['rouge1']}, ROUGE-2: {rouge_scores['rouge2']}, ROUGE-L: {rouge_scores['rougeL']}"
250
  if rouge_scores else "N/A"
 
258
  toxicity_display, bias_display,
259
  paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
260
  paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
261
+ semantic_similarity_display, empathy_score_display, rouge_scores_display
262
  )
263
 
264
  def handle_feedback(feedback, comment):
 
279
  toxicity_output, bias_output,
280
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
281
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
282
+ semantic_similarity_output, empathy_score_output, rouge_scores_output
283
  ]
284
  ).then(
285
  fn=handle_classification,
 
289
  toxicity_output, bias_output,
290
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
291
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
292
+ semantic_similarity_output, empathy_score_output, rouge_scores_output
293
  ]
294
  ).then(
295
  fn=lambda prediction, confidence, html: html,
 
314
  comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
315
  paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
316
  paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
317
+ semantic_similarity_output, empathy_score_output, rouge_scores_output
318
  ]
319
  )
320