baadror's picture
added app
6e85301
raw
history blame
810 Bytes
from transformers import AutoTokenizer, pipeline
from optimum.onnxruntime import ORTModelForQuestionAnswering
import gradio as gr
model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2")
tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
question = "What's my name?"
context = "My name is Philipp and I live in Nuremberg."
def get_answer(question):
pred = onnx_qa(question, context)
return pred
demo = gr.Blocks()
with demo():
with gr.Row():
question = gr.Textbox(label='Question', lines= 3)
b1 = gr.Button('Get Answer')
answer = gr.Textbox(label='Answer', lines=4)
b1.click(fn = get_answer, inputs=question, outputs=answer)
demo.launch()