Spaces:
Sleeping
Sleeping
File size: 1,310 Bytes
8da6fbf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from llm_setup import llm
from vector_db import get_local_content, check_local_knowledge
from web_scrapping import get_web_content
def generate_final_answer(context, query):
"""Generate final answer using LLM"""
messages = [
(
"system",
"You are a helpful assistant. Use the provided context to answer the query accurately.",
),
("system", f"Context: {context}"),
("human", query),
]
response = llm.invoke(messages)
return response.content
def process_query(query, vector_db, local_context):
"""Main function to process user query"""
print(f"Processing query: {query}")
# Etapa 1: Checar se é possível responder com conhecimento local
can_answer_locally = check_local_knowledge(query, local_context)
print(f"Can answer locally: {can_answer_locally}")
# Etapa 2: Obter contexto do banco de dados local ou da web
if can_answer_locally:
context = get_local_content(vector_db, query)
print("Retrieved context from local documents")
else:
context = get_web_content(query)
print("Retrieved context from web scraping")
# Etapa 3: Gerar resposta final
answer = generate_final_answer(context, query)
return answer |