|
import gradio as gr |
|
from transformers import TapexTokenizer, BartForConditionalGeneration |
|
import pandas as pd |
|
|
|
tokenizer = TapexTokenizer.from_pretrained("microsoft/tapex-large-finetuned-wtq") |
|
model = BartForConditionalGeneration.from_pretrained("microsoft/tapex-large-finetuned-wtq") |
|
|
|
data = { |
|
"year": [1896, 1900, 1904, 2004, 2008, 2012], |
|
"city": ["athens", "paris", "st. louis", "athens", "beijing", "london"] |
|
} |
|
table = pd.DataFrame.from_dict(data) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def launch(input): |
|
encoding = tokenizer(table=table, query=input, return_tensors="pt") |
|
outputs=model.generate(**encoding) |
|
return tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
|
|
iface = gr.Interface(launch, |
|
inputs="text", |
|
outputs="text") |
|
iface.launch(share=True) |
|
|