Spaces:
Sleeping
Sleeping
from llm_setup import llm | |
from vector_db import get_local_content, check_local_knowledge | |
from web_scrapping import get_web_content | |
def generate_final_answer(context, query): | |
"""Generate final answer using LLM""" | |
messages = [ | |
( | |
"system", | |
"You are a helpful assistant. Use the provided context to answer the query accurately.", | |
), | |
("system", f"Context: {context}"), | |
("human", query), | |
] | |
response = llm.invoke(messages) | |
return response.content | |
def process_query(query, vector_db, local_context): | |
"""Main function to process user query""" | |
print(f"Processing query: {query}") | |
# Etapa 1: Checar se é possível responder com conhecimento local | |
can_answer_locally = check_local_knowledge(query, local_context) | |
print(f"Can answer locally: {can_answer_locally}") | |
# Etapa 2: Obter contexto do banco de dados local ou da web | |
if can_answer_locally: | |
context = get_local_content(vector_db, query) | |
print("Retrieved context from local documents") | |
else: | |
context = get_web_content(query) | |
print("Retrieved context from web scraping") | |
# Etapa 3: Gerar resposta final | |
answer = generate_final_answer(context, query) | |
return answer |