EduLearnAI / noRag.py
mominah's picture
Update noRag.py
8c7a7a0 verified
raw
history blame
2.74 kB
# noRag.py
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from groq import Groq
from pymongo import MongoClient
from config import CONNECTION_STRING, CHATGROQ_API_KEY, CUSTOM_PROMPT
router = APIRouter(prefix="/norag", tags=["noRag"])
# Initialize Groq client and MongoDB
client = Groq(api_key=CHATGROQ_API_KEY)
mongo = MongoClient(CONNECTION_STRING)
db = mongo["edulearnai"]
chats = db["chats"]
SYSTEM_PROMPT = "You are a helpful assistant which helps people in their tasks."
class ChatRequest(BaseModel):
session_id: str
question: str
@router.post("/chat", summary="Ask a question to the noRag assistant")
async def chat_endpoint(req: ChatRequest):
# Fetch or create session in MongoDB
doc = chats.find_one({"session_id": req.session_id})
if not doc:
doc = {"session_id": req.session_id, "history": [], "summary": ""}
chats.insert_one(doc)
history, summary = doc["history"], doc["summary"]
# Summarize if history too long
if len(history) >= 10:
msgs = [f"{m['role']}: {m['content']}" for m in history]
combined = summary + "\n" + "\n".join(msgs)
sum_prompt = (
"Summarize the following chat history in one or two short sentences:\n\n"
f"{combined}\n\nSummary:"
)
sum_resp = client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=[{"role": "user", "content": sum_prompt}],
temperature=0.3,
max_completion_tokens=150,
top_p=1,
stream=False,
)
summary = sum_resp.choices[0].message.content.strip()
history = []
# Build the prompt for Groq
chat_hist_text = "\n".join(f"{m['role']}: {m['content']}" for m in history)
full_prompt = CUSTOM_PROMPT.format(
context=SYSTEM_PROMPT,
chat_history=chat_hist_text or "(no prior messages)",
question=req.question
)
# Get the assistant’s answer
resp = client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=[{"role": "user", "content": full_prompt}],
temperature=1,
max_completion_tokens=1024,
top_p=1,
stream=False,
)
answer = resp.choices[0].message.content.strip()
# Persist the Q&A
history.append({"role": "user", "content": req.question})
history.append({"role": "assistant", "content": answer})
chats.replace_one(
{"session_id": req.session_id},
{"session_id": req.session_id, "history": history, "summary": summary},
upsert=True
)
return {"session_id": req.session_id, "answer": answer, "summary": summary}