File size: 1,544 Bytes
82071aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfbca52
 
 
82071aa
 
bfbca52
82071aa
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import dspy
from dotenv import load_dotenv

load_dotenv()
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings

persist_directory = "embeddings_db"

lm = dspy.LM("openai/gpt-4o-mini")
dspy.configure(lm=lm)
embedding_function = OpenAIEmbeddings(model="text-embedding-3-small")
vectordb = Chroma(
    persist_directory=persist_directory, embedding_function=embedding_function
)
retriever = vectordb.as_retriever()


def retrieve(inputs):
    docs = retriever.invoke(inputs["question"])
    return docs


def get_source_pages(docs):
    source_pages = []
    for doc in docs:
        section = doc.metadata["source"].split("/")[-2]
        page = doc.metadata["source"].split("/")[-1].split(".")[0]
        source_pages.append(f"{section} - {page}")
    source_pages = list(set(source_pages))
    return source_pages


class COT_RAG(dspy.Module):
    def __init__(self):
        self.respond = dspy.ChainOfThought("context, question -> response")

    def forward(self, question):
        question_ = "Given the context from Cory Booker's speech, please answer the question below."
        question_ += f"\n\nQuestion: {question}\n\nStart your answer by specifying this was from Senator Booker."
        docs = retrieve({"question": question})
        self.docs = docs
        context = [doc.page_content for doc in docs]
        return self.respond(context=context, question=question_)


def answer_question(question):
    rag = COT_RAG()
    answer = rag.forward(question)
    return answer.response