DrishtiSharma commited on
Commit
9fdd9f4
·
verified ·
1 Parent(s): 88b9564

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -21
app.py CHANGED
@@ -36,30 +36,34 @@ def format_doc(doc: Document) -> str:
36
  return f"Document_Title: {doc.metadata.get('title', 'Unknown')}\nPage: {doc.metadata.get('page', 'Unknown')}\nContent: {doc.page_content}"
37
 
38
  # Extract relevant context function
39
- def extract_relevant_context(question, documents):
40
  result = []
41
- with st.spinner("🔍 Extracting relevant content from document..."):
42
- for doc in documents:
43
- formatted_documents = format_doc(doc)
44
- system_prompt = f"{REAG_SYSTEM_PROMPT}\n\n# Available source\n\n{formatted_documents}"
45
- prompt = f"""Determine if the 'Available source' content is sufficient to answer the QUESTION.
46
- QUESTION: {question}
47
- RESPONSE FORMAT (Strict JSON):
 
48
  ```json
49
- {{
50
- "content": "Extracted relevant content",
51
- "reasoning": "Why this was chosen",
52
- "is_irrelevant": false
53
- }}
54
  ```
55
- """
56
- messages = [{"role": "system", "content": system_prompt},
57
- {"role": "user", "content": prompt}]
58
- response = llm_relevancy.invoke(messages)
59
- formatted_response = relevancy_parser.parse(response.content)
60
- result.append(formatted_response)
61
-
62
- final_context = [item['content'] for item in result if not item['is_irrelevant']]
 
 
 
 
63
  return final_context
64
 
65
  # Generate response using RAG Prompt
 
36
  return f"Document_Title: {doc.metadata.get('title', 'Unknown')}\nPage: {doc.metadata.get('page', 'Unknown')}\nContent: {doc.page_content}"
37
 
38
  # Extract relevant context function
39
+ def extract_relevant_context(question,documents):
40
  result = []
41
+ for doc in documents:
42
+ formatted_documents = format_doc(doc)
43
+ system = f"{REAG_SYSTEM_PROMPT}\n\n# Available source\n\n{formatted_documents}"
44
+ prompt = f"""Determine if the 'Avaiable source' content supplied is sufficient and relevant to ANSWER the QUESTION asked.
45
+ QUESTION: {question}
46
+ #INSTRUCTIONS TO FOLLOW
47
+ 1. Analyze the context provided thoroughly to check its relevancy to help formulizing a response for the QUESTION asked.
48
+ 2, STRICTLY PROVIDE THE RESPONSE IN A JSON STRUCTURE AS DESCRIBED BELOW:
49
  ```json
50
+ {{"content":<<The page content of the document that is relevant or sufficient to answer the question asked>>,
51
+ "reasoning":<<The reasoning for selecting The page content with respect to the question asked>>,
52
+ "is_irrelevant":<<Specify 'True' if the content in the document is not sufficient or relevant.Specify 'False' if the page content is sufficient to answer the QUESTION>>
53
+ }}
 
54
  ```
55
+ """
56
+ messages =[ {"role": "system", "content": system},
57
+ {"role": "user", "content": prompt},
58
+ ]
59
+ response = llm_relevancy.invoke(messages)
60
+ print(response.content)
61
+ formatted_response = relevancy_parser.parse(response.content)
62
+ result.append(formatted_response)
63
+ final_context = []
64
+ for items in result:
65
+ if (items['is_irrelevant'] == False) or ( items['is_irrelevant'] == 'false') or (items['is_irrelevant'] == 'False'):
66
+ final_context.append(items['content'])
67
  return final_context
68
 
69
  # Generate response using RAG Prompt