from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM import streamlit as st @st.cache_resource def load_model(): model_id = "google/flan-t5-base" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) return pipeline("text2text-generation", model=model, tokenizer=tokenizer) generator = load_model() def ask_question(question, df): csv_data = df.to_csv(index=False) prompt = f"Given the following table data: {csv_data} Answer this question: {question}" result = generator(prompt, max_new_tokens=256)[0]["generated_text"] return result.strip()