baadror's picture
Allow to put context as well
090f39a
raw
history blame
880 Bytes
from transformers import AutoTokenizer, pipeline
from optimum.onnxruntime import ORTModelForQuestionAnswering
import gradio as gr
model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2")
tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
# question = "What's my name??"
# context = "My name is Philipp and I live in Nuremberg."
def get_answer(question):
pred = onnx_qa(question, context)['answer']
return pred
demo = gr.Blocks()
with demo:
with gr.Row():
context = gr.Textbox(label='Document', lines=10)
question = gr.Textbox(label='Question', lines= 3)
b1 = gr.Button('Get Answer')
answer = gr.Textbox(label='Answer', lines=4)
b1.click(fn = get_answer, inputs=question, outputs=answer)
demo.launch()