File size: 1,386 Bytes
6e85301
 
 
 
 
 
 
 
090f39a
 
7fed9b9
375287a
7fed9b9
6e85301
 
 
7fed9b9
 
 
 
 
 
6e85301
 
c64851f
6e85301
090f39a
6e85301
 
 
7fed9b9
 
6e85301
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from transformers import AutoTokenizer, pipeline
from optimum.onnxruntime import ORTModelForQuestionAnswering
import gradio as gr
model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2")
tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")

onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)

# question = "What's my name??"
# context = "My name is Philipp and I live in Nuremberg."
def get_answer(context, question):
    # question, context = inputs
    pred = onnx_qa(question, context)
    return pred


examples = [
  ["""In supervised learning, input data is provided to the model along with the output. In unsupervised learning, only input data is provided to the model. The goal of supervised learning is to train the model so that it can predict the output when it is given new data.""", "Explain supervised learning",],
  # []  # You can add context examples without questions
]


demo = gr.Blocks()

with demo:
    with gr.Row():
        context = gr.Textbox(label='Document', lines=10)
        question = gr.Textbox(label='Question', lines= 3)
        b1 = gr.Button('Get Answer')
        answer = gr.Textbox(label='Answer', lines=4)
    gr.Examples(examples= examples, inputs=[context, question], outputs=answer)
    b1.click(fn = get_answer, inputs=[context, question], outputs=answer)

demo.launch()