File size: 2,156 Bytes
e011c72
 
 
304edde
 
e011c72
 
 
304edde
 
e011c72
 
304edde
e011c72
 
 
 
 
 
304edde
e011c72
 
 
 
304edde
e011c72
 
304edde
 
 
 
 
 
 
 
e011c72
 
 
 
 
 
 
 
 
 
304edde
 
 
 
 
 
 
 
 
 
e011c72
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
import time

EXAMPLES = ['test.wav']

def speech_to_text(speech):
    time.sleep(1)
    return [
        ("So I've prepared a presentation I'm sharing it with All you should be ", "Speaker 0"),
        ("able to seat on your screen right now. Got it?", "Speaker 0"),
        ("from 0.258-6.249", None),
        ("I don't see a link anywhere says it Headed down low a plug and.", "Speaker 1"),
        ("from 6.384-9.573", None)], """so i've prepared a presentation \n i'm sharing it with all you should be able to seat on your screen right now got it i don't see a link anywhere says it headed down low a plug and"""

def sentiment(checked_options):
    time.sleep(0.3)
    return {"happy": 0.5, "confused": 0.3, "sad": 0.2}

demo = gr.Blocks()
demo.encrypt = False

with demo:
    with gr.Row():
        with gr.Column():
            audio = gr.Audio(label="Audio file")
            with gr.Row():
                btn = gr.Button("Transcribe")
                
            with gr.Row():
                examples = gr.components.Dataset(
                    components=[audio],
                    samples=[EXAMPLES],
                    type="index",
                )
                
        with gr.Column():
            gr.Markdown("**Diarized Output:**")
            diarized = gr.HighlightedText(lines=5, label="Diarized Output")
            full = gr.Textbox(lines=4, label="Full Transcript")
            check = gr.CheckboxGroup(["Speaker 1", "Speaker 2"], label="Choose speaker(s) for sentiment analysis")
            label = gr.Label()
        
        btn.click(speech_to_text, audio, [diarized, full], status_tracker=gr.StatusTracker(cover_container=True))
        check.change(sentiment, check, label, status_tracker=gr.StatusTracker(cover_container=True))
        
        def load_example(example_id):
            processed_examples = audio.preprocess_example(EXAMPLES[example_id])
            print(processed_examples)
            return processed_examples
        
        examples._click_no_postprocess(
            load_example,
            inputs=[examples],
            outputs=[audio])
        
    demo.launch()