Spaces:
Running
Running
File size: 1,629 Bytes
b7f4e8c 3f0e240 c44b083 b7f4e8c 3f0e240 c44b083 3f0e240 b7f4e8c c1d292c 9751345 b7f4e8c 9751345 c44b083 9751345 b7f4e8c 9751345 b7f4e8c c44b083 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import lancedb
import os
import gradio as gr
from sentence_transformers import SentenceTransformer
from pathlib import Path
from dotenv import load_dotenv
# Load environment variables from the .env file
load_dotenv()
print(f"Current Working Directory: {os.getcwd()}")
# Connect to the LanceDB database
current_working_dir = Path(os.getcwd())
db_path = current_working_dir / ".lancedb"
db = lancedb.connect(db_path)
MODEL1_STRATEGY1 = "model1_fixed"
MODEL2_STRATEGY1 = "model2_fixed"
MODEL3_STRATEGY1 = "model3_fixed"
VECTOR_COLUMN = os.getenv("VECTOR_COLUMN", "vector")
TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
def get_table_name():
emb_model = os.getenv("EMB_MODEL")
print(emb_model)
if emb_model == "sentence-transformers/all-MiniLM-L6-v2":
return MODEL1_STRATEGY1
elif emb_model == "BAAI/bge-large-en-v1.5":
return MODEL2_STRATEGY1
elif emb_model == "openai/text-embedding-ada-002":
return MODEL3_STRATEGY1
else:
raise ValueError(f"Unsupported embedding model: {emb_model}")
def retrieve(query, k):
table_name = get_table_name()
TABLE = db.open_table(table_name)
query_vec = retriever.encode(query)
try:
documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(k).to_list()
documents = [doc[TEXT_COLUMN] for doc in documents]
return documents
except Exception as e:
raise gr.Error(str(e))
if __name__ == "__main__":
res = retrieve("What is transformer?", 4)
print(res) |