import lancedb import os import gradio as gr from sentence_transformers import SentenceTransformer from pathlib import Path from dotenv import load_dotenv # Load environment variables from the .env file load_dotenv() print(f"Current Working Directory: {os.getcwd()}") # Connect to the LanceDB database current_working_dir = Path(os.getcwd()) db_path = current_working_dir / ".lancedb" db = lancedb.connect(db_path) MODEL1_STRATEGY1 = "model1_fixed" MODEL2_STRATEGY1 = "model2_fixed" MODEL3_STRATEGY1 = "model3_fixed" VECTOR_COLUMN = os.getenv("VECTOR_COLUMN", "vector") TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text") BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32)) retriever = SentenceTransformer(os.getenv("EMB_MODEL")) def get_table_name(): emb_model = os.getenv("EMB_MODEL") print(emb_model) if emb_model == "sentence-transformers/all-MiniLM-L6-v2": return MODEL1_STRATEGY1 elif emb_model == "BAAI/bge-large-en-v1.5": return MODEL2_STRATEGY1 elif emb_model == "openai/text-embedding-ada-002": return MODEL3_STRATEGY1 else: raise ValueError(f"Unsupported embedding model: {emb_model}") def retrieve(query, k): table_name = get_table_name() TABLE = db.open_table(table_name) query_vec = retriever.encode(query) try: documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(k).to_list() documents = [doc[TEXT_COLUMN] for doc in documents] return documents except Exception as e: raise gr.Error(str(e)) if __name__ == "__main__": res = retrieve("What is transformer?", 4) print(res)