FluentQ / app /agent.py
tommytracx's picture
Upload 4 files
42c727a verified
raw
history blame
421 Bytes
from models.local_llm import run_llm
conversation_memory = []
def process_text(input_text: str) -> str:
conversation_memory.append({"user": input_text})
context = "\n".join([f"User: {m['user']}" for m in conversation_memory])
prompt = f"You are a telecom AI assistant. Context:\n{context}\nRespond:"
response = run_llm(prompt)
conversation_memory.append({"assistant": response})
return response