seansullivan commited on
Commit
e90009f
·
verified ·
1 Parent(s): 03550bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -14
app.py CHANGED
@@ -10,6 +10,7 @@ from langchain_community.retrievers import PineconeHybridSearchRetriever
10
  from langchain.tools.retriever import create_retriever_tool
11
  from langgraph.prebuilt import create_react_agent
12
 
 
13
 
14
  os.environ['LANGCHAIN_TRACING_V2'] = "true"
15
 
@@ -197,7 +198,7 @@ def init_agent(namespace1: str, top_k1: int, namespace2: str, top_k2: int):
197
  st.sidebar.header("Retriever Tool Settings")
198
 
199
  # Dropdown and slider for Retriever Tool 1 (empty option available)
200
- namespace_options = ["langgraph-main", "autogen", ""]
201
  namespace1 = st.sidebar.selectbox("Select namespace for Retriever Tool 1:", namespace_options, index=0)
202
  top_k1 = st.sidebar.slider("Select top_k for Retriever Tool 1:", min_value=1, max_value=4, value=1, step=1)
203
 
@@ -242,40 +243,52 @@ with st.form("chat_form", clear_on_submit=True):
242
  if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
243
  inputs = {"messages": st.session_state.chat_history}
244
 
245
- # Create separate placeholders for tool output and final answer.
 
246
  tool_output_placeholder = st.empty()
247
  final_answer_placeholder = st.empty()
248
 
249
  # Accumulators for each section.
 
250
  tool_output_text = ""
251
  final_answer_text = ""
252
 
253
  # Stream the agent's response chunk-by-chunk.
254
  for s in graph.stream(inputs, stream_mode="values"):
255
- # Get the last message in the chunk.
256
  message = s["messages"][-1]
257
 
258
  if isinstance(message, tuple):
259
- # This is a tool output message (e.g., retrieved docs or tool usage).
 
260
  role, text = message
261
- tool_output_text += text
262
- # Update the tool output area with a heading and the accumulated text.
263
- tool_output_placeholder.markdown(
264
- f"### Tool Output\n\n{tool_output_text}",
265
- unsafe_allow_html=True
266
- )
 
 
 
 
 
 
267
  else:
268
  # This is the final answer generated by the AI.
269
  text = message.content
270
- final_answer_text += text
271
- # Update the final answer area with a heading and the accumulated text.
272
  final_answer_placeholder.markdown(
273
  f"### Final Answer\n\n{final_answer_text}",
274
  unsafe_allow_html=True
275
  )
276
 
277
- # Once complete, combine both sections into the chat history.
278
- combined_response = f"**Tool Output:**\n\n{tool_output_text}\n\n**Final Answer:**\n\n{final_answer_text}"
 
 
 
 
279
  st.session_state.chat_history.append(("assistant", combined_response))
280
 
281
 
 
10
  from langchain.tools.retriever import create_retriever_tool
11
  from langgraph.prebuilt import create_react_agent
12
 
13
+ LANGCHAIN_API_KEY = os.environ['LANGCHAIN_API_KEY']
14
 
15
  os.environ['LANGCHAIN_TRACING_V2'] = "true"
16
 
 
198
  st.sidebar.header("Retriever Tool Settings")
199
 
200
  # Dropdown and slider for Retriever Tool 1 (empty option available)
201
+ namespace_options = ["langgraph-main", "autogen", "llm-cli", ""]
202
  namespace1 = st.sidebar.selectbox("Select namespace for Retriever Tool 1:", namespace_options, index=0)
203
  top_k1 = st.sidebar.slider("Select top_k for Retriever Tool 1:", min_value=1, max_value=4, value=1, step=1)
204
 
 
243
  if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
244
  inputs = {"messages": st.session_state.chat_history}
245
 
246
+ # Create separate placeholders for each section.
247
+ tool_calls_placeholder = st.empty()
248
  tool_output_placeholder = st.empty()
249
  final_answer_placeholder = st.empty()
250
 
251
  # Accumulators for each section.
252
+ tool_calls_text = ""
253
  tool_output_text = ""
254
  final_answer_text = ""
255
 
256
  # Stream the agent's response chunk-by-chunk.
257
  for s in graph.stream(inputs, stream_mode="values"):
258
+ # Extract the last message from the current chunk.
259
  message = s["messages"][-1]
260
 
261
  if isinstance(message, tuple):
262
+ # This is a tool-related message.
263
+ # We use a simple heuristic: if the text contains "call_" (case-insensitive), we treat it as a tool call.
264
  role, text = message
265
+ if "call_" in text.lower():
266
+ tool_calls_text += text + "\n\n"
267
+ tool_calls_placeholder.markdown(
268
+ f"### Tool Calls\n\n{tool_calls_text}",
269
+ unsafe_allow_html=True
270
+ )
271
+ else:
272
+ tool_output_text += text + "\n\n"
273
+ tool_output_placeholder.markdown(
274
+ f"### Tool Output\n\n{tool_output_text}",
275
+ unsafe_allow_html=True
276
+ )
277
  else:
278
  # This is the final answer generated by the AI.
279
  text = message.content
280
+ final_answer_text += text + "\n\n"
 
281
  final_answer_placeholder.markdown(
282
  f"### Final Answer\n\n{final_answer_text}",
283
  unsafe_allow_html=True
284
  )
285
 
286
+ # Once complete, combine all sections into one record for persistence.
287
+ combined_response = (
288
+ f"**Tool Calls:**\n\n{tool_calls_text}\n\n"
289
+ f"**Tool Output:**\n\n{tool_output_text}\n\n"
290
+ f"**Final Answer:**\n\n{final_answer_text}"
291
+ )
292
  st.session_state.chat_history.append(("assistant", combined_response))
293
 
294