pradeepsengarr commited on
Commit
920b3d6
·
verified ·
1 Parent(s): dea11f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -2
app.py CHANGED
@@ -498,9 +498,22 @@ def process_answer(question, full_text, retriever):
498
  summary = llm(prompt) # Uses the LLM to generate a summary
499
  return summary
500
 
 
 
 
 
 
 
 
 
 
 
 
501
  # Use RetrievalQA for general queries
502
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
503
- return qa_chain.run(question) # This is the main answer generation with retrieval
 
 
504
 
505
  # --- UI Layout ---
506
  with st.sidebar:
@@ -545,4 +558,3 @@ if uploaded_file:
545
  st.error("⚠️ No text could be extracted from the PDF. Try another file.")
546
  else:
547
  st.info("Upload a PDF to begin.")
548
-
 
498
  summary = llm(prompt) # Uses the LLM to generate a summary
499
  return summary
500
 
501
+ # --- Prompt Engineering ---
502
+ # Let's modify how we ask the model to answer
503
+ prompt = f"""
504
+ Given the following text, answer the question with a simple and direct 'Yes' or 'No' followed by a brief explanation.
505
+
506
+ Text: {full_text[:3000]}
507
+
508
+ Question: {question}
509
+ Answer:
510
+ """
511
+
512
  # Use RetrievalQA for general queries
513
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
514
+ response = qa_chain.run(question)
515
+
516
+ return response
517
 
518
  # --- UI Layout ---
519
  with st.sidebar:
 
558
  st.error("⚠️ No text could be extracted from the PDF. Try another file.")
559
  else:
560
  st.info("Upload a PDF to begin.")