bunyaminergen commited on
Commit
c7defd6
·
1 Parent(s): 384a27c
Files changed (1) hide show
  1. app.py +68 -4
app.py CHANGED
@@ -206,19 +206,83 @@ def process_audio(uploaded_audio):
206
  return {"error": str(e)}
207
 
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  with gr.Blocks() as demo:
210
- gr.Markdown("## Diarization, Transcription & Analysis")
211
 
212
  with gr.Row():
213
  audio_input = gr.Audio(type="filepath", label="Upload your audio")
214
- output_display = gr.JSON(label="Final Output")
215
 
216
  submit_btn = gr.Button("Process")
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  submit_btn.click(
219
- fn=process_audio,
220
  inputs=audio_input,
221
- outputs=output_display
222
  )
223
 
224
  if __name__ == "__main__":
 
206
  return {"error": str(e)}
207
 
208
 
209
+ def transform_output_to_tables(final_output: dict):
210
+ """
211
+ Helper function to convert data into a table view.
212
+ Transforms data inside `final_output` into two separate tables.
213
+
214
+ Parameters
215
+ ----------
216
+ final_output : dict
217
+ Dictionary containing processed results.
218
+
219
+ Returns
220
+ -------
221
+ tuple
222
+ Returns two lists as `(ssm_data, file_data)`.
223
+ """
224
+
225
+ if "error" in final_output:
226
+ return [], []
227
+
228
+ # Utterance Table
229
+ ssm_data = []
230
+ if "ssm" in final_output:
231
+ for item in final_output["ssm"]:
232
+ ssm_data.append([
233
+ item.get("speaker", ""),
234
+ item.get("start_time", ""),
235
+ item.get("end_time", ""),
236
+ item.get("text", ""),
237
+ item.get("index", ""),
238
+ item.get("sentiment", ""),
239
+ item.get("profane", "")
240
+ ])
241
+
242
+ # File Table
243
+ file_data = []
244
+ for key in ["summary", "conflict", "topic", "silence"]:
245
+ file_data.append([key, final_output.get(key, "")])
246
+
247
+ return ssm_data, file_data
248
+
249
+
250
  with gr.Blocks() as demo:
251
+ gr.Markdown("Callytics Demo")
252
 
253
  with gr.Row():
254
  audio_input = gr.Audio(type="filepath", label="Upload your audio")
 
255
 
256
  submit_btn = gr.Button("Process")
257
 
258
+ with gr.Row():
259
+ utterance_table = gr.Dataframe(
260
+ headers=["Speaker", "Start Time", "End Time", "Text", "Index", "Sentiment", "Profane"],
261
+ label="Utterance Table"
262
+ )
263
+
264
+ with gr.Row():
265
+ file_table = gr.Dataframe(
266
+ headers=["Key", "Value"],
267
+ label="File Table"
268
+ )
269
+
270
+ output_display = gr.JSON(label="Final Output (JSON)")
271
+
272
+
273
+ def process_and_show_tables(uploaded_audio):
274
+ """
275
+ Calls the main processing function `process_audio` and returns data suitable for the table.
276
+ """
277
+ final_output = process_audio(uploaded_audio)
278
+ ssm_data, file_data = transform_output_to_tables(final_output)
279
+ return ssm_data, file_data, final_output
280
+
281
+
282
  submit_btn.click(
283
+ fn=process_and_show_tables,
284
  inputs=audio_input,
285
+ outputs=[utterance_table, file_table, output_display]
286
  )
287
 
288
  if __name__ == "__main__":