JanviMl commited on
Commit
6f0e9db
·
verified ·
1 Parent(s): c91906e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -8
app.py CHANGED
@@ -4,7 +4,7 @@ from classifier import classify_toxic_comment
4
 
5
  # Clear function for resetting the UI
6
  def clear_inputs():
7
- return "", 0, "", [], "", ""
8
 
9
  # Custom CSS for styling
10
  custom_css = """
@@ -75,6 +75,23 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
75
  label_display = gr.HTML()
76
  threshold_display = gr.HTML()
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  with gr.Accordion("Prediction History", open=False):
79
  history_output = gr.JSON(label="Previous Predictions")
80
 
@@ -90,31 +107,75 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
90
  def handle_classification(comment, history):
91
  if history is None:
92
  history = []
93
- prediction, confidence, color, toxicity_score, bias_score = classify_toxic_comment(comment)
 
 
 
 
 
 
94
  history.append({
95
  "comment": comment,
96
  "prediction": prediction,
97
  "confidence": confidence,
98
  "toxicity_score": toxicity_score,
99
- "bias_score": bias_score
 
 
 
 
 
 
 
 
100
  })
 
101
  threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
102
  threshold_color = "green" if confidence >= 0.7 else "orange"
103
  toxicity_display = f"{toxicity_score} (Scale: 0 to 1, lower is less toxic)" if toxicity_score is not None else "N/A"
104
  bias_display = f"{bias_score} (Scale: 0 to 1, lower indicates less bias)" if bias_score is not None else "N/A"
105
- return prediction, confidence, color, history, threshold_message, threshold_color, toxicity_display, bias_display
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  def handle_feedback(feedback, comment):
108
  return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}"
109
 
110
  submit_btn.click(
111
- fn=lambda: ("Classifying...", 0, "", None, "", "", "Calculating...", "Calculating..."), # Show loading state
112
  inputs=[],
113
- outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display, toxicity_output, bias_output]
 
 
 
 
 
 
114
  ).then(
115
  fn=handle_classification,
116
  inputs=[comment_input, history_output],
117
- outputs=[prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display, toxicity_output, bias_output]
 
 
 
 
 
 
118
  ).then(
119
  fn=lambda prediction, confidence, color: f"<span style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>",
120
  inputs=[prediction_output, confidence_output, label_display],
@@ -134,7 +195,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
134
  clear_btn.click(
135
  fn=clear_inputs,
136
  inputs=[],
137
- outputs=[comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output]
 
 
 
 
 
138
  )
139
 
140
  gr.Markdown(
 
4
 
5
  # Clear function for resetting the UI
6
  def clear_inputs():
7
+ return "", 0, "", [], "", "", "", "", 0, "", "", "", "", ""
8
 
9
  # Custom CSS for styling
10
  custom_css = """
 
75
  label_display = gr.HTML()
76
  threshold_display = gr.HTML()
77
 
78
+ with gr.Accordion("Paraphrased Output (if Toxic)", open=False):
79
+ paraphrased_comment_output = gr.Textbox(label="Paraphrased Comment", placeholder="Paraphrased comment will appear here if the input is toxic...")
80
+ paraphrased_prediction_output = gr.Textbox(label="Paraphrased Prediction", placeholder="Prediction will appear here...")
81
+ paraphrased_toxicity_output = gr.Textbox(label="Paraphrased Toxicity Score", placeholder="Toxicity score will appear here...")
82
+ paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
83
+ paraphrased_confidence_output = gr.Slider(
84
+ label="Paraphrased Confidence",
85
+ minimum=0,
86
+ maximum=1,
87
+ value=0,
88
+ interactive=False
89
+ )
90
+ paraphrased_label_display = gr.HTML()
91
+ semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
92
+ emotion_shift_output = gr.Textbox(label="Emotion Shift", placeholder="Emotion shift will appear here...")
93
+ empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
94
+
95
  with gr.Accordion("Prediction History", open=False):
96
  history_output = gr.JSON(label="Previous Predictions")
97
 
 
107
  def handle_classification(comment, history):
108
  if history is None:
109
  history = []
110
+ (
111
+ prediction, confidence, color, toxicity_score, bias_score,
112
+ paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
113
+ paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
114
+ semantic_similarity, emotion_shift, empathy_score
115
+ ) = classify_toxic_comment(comment)
116
+
117
  history.append({
118
  "comment": comment,
119
  "prediction": prediction,
120
  "confidence": confidence,
121
  "toxicity_score": toxicity_score,
122
+ "bias_score": bias_score,
123
+ "paraphrased_comment": paraphrased_comment,
124
+ "paraphrased_prediction": paraphrased_prediction,
125
+ "paraphrased_confidence": paraphrased_confidence,
126
+ "paraphrased_toxicity_score": paraphrased_toxicity_score,
127
+ "paraphrased_bias_score": paraphrased_bias_score,
128
+ "semantic_similarity": semantic_similarity,
129
+ "emotion_shift": emotion_shift,
130
+ "empathy_score": empathy_score
131
  })
132
+
133
  threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
134
  threshold_color = "green" if confidence >= 0.7 else "orange"
135
  toxicity_display = f"{toxicity_score} (Scale: 0 to 1, lower is less toxic)" if toxicity_score is not None else "N/A"
136
  bias_display = f"{bias_score} (Scale: 0 to 1, lower indicates less bias)" if bias_score is not None else "N/A"
137
+
138
+ paraphrased_comment_display = paraphrased_comment if paraphrased_comment else "N/A (Comment was non-toxic)"
139
+ paraphrased_prediction_display = paraphrased_prediction if paraphrased_prediction else "N/A"
140
+ paraphrased_confidence_display = paraphrased_confidence if paraphrased_confidence else 0
141
+ paraphrased_toxicity_display = f"{paraphrased_toxicity_score} (Scale: 0 to 1, lower is less toxic)" if paraphrased_toxicity_score is not None else "N/A"
142
+ paraphrased_bias_display = f"{paraphrased_bias_score} (Scale: 0 to 1, lower indicates less bias)" if paraphrased_bias_score is not None else "N/A"
143
+ paraphrased_label_html = f"<span style='color: {paraphrased_color}; font-size: 20px; font-weight: bold;'>{paraphrased_prediction}</span>" if paraphrased_prediction else ""
144
+ semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
145
+ emotion_shift_display = emotion_shift if emotion_shift else "N/A"
146
+ empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
147
+
148
+ return (
149
+ prediction, confidence, color, history, threshold_message, threshold_color,
150
+ toxicity_display, bias_display,
151
+ paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
152
+ paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
153
+ semantic_similarity_display, emotion_shift_display, empathy_score_display
154
+ )
155
 
156
  def handle_feedback(feedback, comment):
157
  return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}"
158
 
159
  submit_btn.click(
160
+ fn=lambda: ("Classifying...", 0, "", None, "", "", "Calculating...", "Calculating...", "Paraphrasing...", "Calculating...", 0, "Calculating...", "Calculating...", "", "Calculating...", "Calculating...", "Calculating..."), # Show loading state
161
  inputs=[],
162
+ outputs=[
163
+ prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
164
+ toxicity_output, bias_output,
165
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
166
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
167
+ semantic_similarity_output, emotion_shift_output, empathy_score_output
168
+ ]
169
  ).then(
170
  fn=handle_classification,
171
  inputs=[comment_input, history_output],
172
+ outputs=[
173
+ prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
174
+ toxicity_output, bias_output,
175
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
176
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
177
+ semantic_similarity_output, emotion_shift_output, empathy_score_output
178
+ ]
179
  ).then(
180
  fn=lambda prediction, confidence, color: f"<span style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>",
181
  inputs=[prediction_output, confidence_output, label_display],
 
195
  clear_btn.click(
196
  fn=clear_inputs,
197
  inputs=[],
198
+ outputs=[
199
+ comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
200
+ paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
201
+ paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
202
+ semantic_similarity_output, emotion_shift_output, empathy_score_output
203
+ ]
204
  )
205
 
206
  gr.Markdown(