File size: 651 Bytes
53848b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import streamlit as st

@st.cache_resource
def load_model():
    model_id = "google/flan-t5-base"
    tokenizer = AutoTokenizer.from_pretrained(model_id)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
    return pipeline("text2text-generation", model=model, tokenizer=tokenizer)

generator = load_model()

def ask_question(question, df):
    csv_data = df.to_csv(index=False)
    prompt = f"Given the following table data:
{csv_data}

Answer this question:
{question}"
    result = generator(prompt, max_new_tokens=256)[0]["generated_text"]
    return result.strip()