Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -36,7 +36,7 @@ with st.sidebar:
|
|
36 |
|
37 |
system_message = st.text_area(
|
38 |
"System Message",
|
39 |
-
value="You are a friendly chatbot created by
|
40 |
height=100
|
41 |
)
|
42 |
|
@@ -114,6 +114,8 @@ for message in st.session_state.messages:
|
|
114 |
|
115 |
# Handle input and PDF processing
|
116 |
uploaded_file = st.file_uploader("Upload PDF", type="pdf", accept_multiple_files=False)
|
|
|
|
|
117 |
if uploaded_file:
|
118 |
documents = process_pdf(uploaded_file)
|
119 |
context = "\n\n".join([doc.page_content for doc in documents])
|
@@ -174,8 +176,8 @@ if uploaded_file:
|
|
174 |
st.error(f"Application Error: {str(e)}")
|
175 |
|
176 |
# Allow user to ask a question based on extracted PDF content
|
177 |
-
if
|
178 |
-
if
|
179 |
context = "\n\n".join([doc.page_content for doc in documents]) # Get context from documents
|
180 |
answer = generate_response_with_langchain(prompt, context)
|
181 |
|
|
|
36 |
|
37 |
system_message = st.text_area(
|
38 |
"System Message",
|
39 |
+
value="You are a friendly chatbot created by who Provide clear, accurate, and brief answers. Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives.",
|
40 |
height=100
|
41 |
)
|
42 |
|
|
|
114 |
|
115 |
# Handle input and PDF processing
|
116 |
uploaded_file = st.file_uploader("Upload PDF", type="pdf", accept_multiple_files=False)
|
117 |
+
documents = None # Initialize the documents variable
|
118 |
+
|
119 |
if uploaded_file:
|
120 |
documents = process_pdf(uploaded_file)
|
121 |
context = "\n\n".join([doc.page_content for doc in documents])
|
|
|
176 |
st.error(f"Application Error: {str(e)}")
|
177 |
|
178 |
# Allow user to ask a question based on extracted PDF content
|
179 |
+
if uploaded_file and documents: # Ensure documents exist before proceeding
|
180 |
+
if prompt := st.chat_input("Ask a question about the PDF content"):
|
181 |
context = "\n\n".join([doc.page_content for doc in documents]) # Get context from documents
|
182 |
answer = generate_response_with_langchain(prompt, context)
|
183 |
|