seansullivan commited on
Commit
878e587
·
verified ·
1 Parent(s): ddc6dc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -28
app.py CHANGED
@@ -19,6 +19,8 @@ from langgraph.prebuilt import create_react_agent
19
 
20
 
21
  # Show title and description.
 
 
22
  st.title("Coder for NextJS Templates")
23
  st.markdown(
24
  "This chatbot connects to a Next.JS Github Repository to answer questions and modify code "
@@ -179,17 +181,14 @@ else:
179
  else:
180
  llm = ChatAnthropic(temperature=0, model_name="claude-3-haiku-20240307")
181
 
182
- system_prompt_template = """You are an AI specialized in managing and analyzing a GitHub repository for a Next.js blog website.
 
183
  Your task is to answer user queries about the repository or execute tasks for modifying it.
184
-
185
  Before performing any operation, always use the force_clone_repo tool to ensure you have the latest version of the repository.
186
-
187
  Here is all of the code from the repository as well as the file paths for context of how the repo is structured: {REPO_CONTENT}
188
-
189
  Given this context, follow this prompt in completing the user's task:
190
  For user questions, provide direct answers based on the current state of the repository.
191
  For tasks given by the user, use the available tools and your knowledge of the repo to make necessary changes to the repository.
192
-
193
  When making changes, remember to force clone the repository first, make the changes, and then commit and push the changes.
194
  Available tools:
195
  1. shell_tool: Execute shell commands
@@ -198,8 +197,13 @@ else:
198
  4. commit_and_push: Commit and push changes to the repository
199
  5. read_file: Read content from a specific file in the repository
200
  When using the write_file tool, always provide both the file_path and the content as separate arguments.
201
-
202
  Respond to the human's messages and use tools when necessary to complete tasks. Take a deep breath and think through the task step by step:"""
 
 
 
 
 
 
203
 
204
  from langgraph.checkpoint import MemorySaver
205
 
@@ -282,23 +286,29 @@ else:
282
  repo_contents_json = json.dumps(repo_contents, ensure_ascii=False, indent=2)
283
  st.session_state.REPO_CONTENT = repo_contents_json
284
  st.success("Repository content refreshed successfully.")
285
-
286
- # Update the system prompt with the new repo content
287
- st.session_state.system_prompt = system_prompt_template.format(REPO_CONTENT=st.session_state.REPO_CONTENT)
288
-
289
- # Recreate the graph with the updated system prompt
290
- global graph
 
291
  if st.session_state.use_sonnet and "ANTHROPIC_API_KEY" in os.environ:
292
  new_llm = ChatAnthropic(temperature=0, model_name="claude-3-5-sonnet-20240620")
293
  else:
294
  new_llm = ChatAnthropic(temperature=0, model_name="claude-3-haiku-20240307")
295
 
296
- graph = create_react_agent(
297
  new_llm,
298
  tools=tools,
299
- messages_modifier=st.session_state.system_prompt,
300
  checkpointer=memory
301
  )
 
 
 
 
 
302
 
303
  if st.session_state.use_sonnet and "ANTHROPIC_API_KEY" in os.environ:
304
  refresh_repo_data()
@@ -311,26 +321,32 @@ else:
311
  if "system_prompt" not in st.session_state:
312
  st.session_state.system_prompt = system_prompt_template.format(REPO_CONTENT=st.session_state.REPO_CONTENT)
313
 
314
- graph = create_react_agent(
 
315
  llm,
316
  tools=tools,
317
- messages_modifier=st.session_state.system_prompt,
318
  checkpointer=memory
319
  )
320
-
321
- from langchain_core.messages import AIMessage, ToolMessage
322
-
 
 
 
323
  async def run_github_editor(query: str, thread_id: str = "default"):
324
  inputs = {"messages": [HumanMessage(content=query)]}
325
  config = {
326
  "configurable": {"thread_id": thread_id},
327
- "recursion_limit": 50 # Add this line to set the recursion limit
328
  }
329
-
330
  st.write(f"Human: {query}\n")
331
-
332
  current_thought = ""
333
-
 
 
334
  async for event in graph.astream_events(inputs, config=config, version="v2"):
335
  kind = event["event"]
336
  if kind == "on_chat_model_start":
@@ -347,9 +363,9 @@ else:
347
  current_thought = ""
348
  else:
349
  st.write(content, end="")
350
- elif kind == "on_tool_start":
351
  st.write(f"\nUsing tool: {event['name']}")
352
- elif kind == "on_tool_end":
353
  st.write(f"Tool result: {event['data']['output']}\n")
354
 
355
  # Create a session state variable to store the chat messages. This ensures that the
@@ -368,12 +384,11 @@ else:
368
 
369
  # Create a chat input field to allow the user to enter a message. This will display
370
  # automatically at the bottom of the page.
371
- if prompt := st.chat_input("Give me a Task!"):
372
-
373
  # Store and display the current prompt.
374
  st.session_state.messages.append({"role": "user", "content": prompt})
375
  with st.chat_message("user"):
376
  st.markdown(prompt)
377
-
378
  # Generate a response using the custom chatbot logic.
379
  asyncio.run(run_github_editor(prompt))
 
19
 
20
 
21
  # Show title and description.
22
+ # Add a radio button for mode selection
23
+ mode = st.radio("Select Mode", ["Q/A", "Task"])
24
  st.title("Coder for NextJS Templates")
25
  st.markdown(
26
  "This chatbot connects to a Next.JS Github Repository to answer questions and modify code "
 
181
  else:
182
  llm = ChatAnthropic(temperature=0, model_name="claude-3-haiku-20240307")
183
 
184
+ # Modify the system prompts
185
+ task_system_prompt_template = """You are an AI specialized in managing and analyzing a GitHub repository for a Next.js blog website.
186
  Your task is to answer user queries about the repository or execute tasks for modifying it.
 
187
  Before performing any operation, always use the force_clone_repo tool to ensure you have the latest version of the repository.
 
188
  Here is all of the code from the repository as well as the file paths for context of how the repo is structured: {REPO_CONTENT}
 
189
  Given this context, follow this prompt in completing the user's task:
190
  For user questions, provide direct answers based on the current state of the repository.
191
  For tasks given by the user, use the available tools and your knowledge of the repo to make necessary changes to the repository.
 
192
  When making changes, remember to force clone the repository first, make the changes, and then commit and push the changes.
193
  Available tools:
194
  1. shell_tool: Execute shell commands
 
197
  4. commit_and_push: Commit and push changes to the repository
198
  5. read_file: Read content from a specific file in the repository
199
  When using the write_file tool, always provide both the file_path and the content as separate arguments.
 
200
  Respond to the human's messages and use tools when necessary to complete tasks. Take a deep breath and think through the task step by step:"""
201
+
202
+ qa_system_prompt_template = """You are an AI specialized in analyzing a GitHub repository for a Next.js blog website.
203
+ Your task is to answer user queries about the repository based on the provided content.
204
+ Here is all of the code from the repository as well as the file paths for context of how the repo is structured: {REPO_CONTENT}
205
+ Given this context, provide direct answers to user questions based on the current state of the repository.
206
+ Take a deep breath and think through the question step by step before answering:"""
207
 
208
  from langgraph.checkpoint import MemorySaver
209
 
 
286
  repo_contents_json = json.dumps(repo_contents, ensure_ascii=False, indent=2)
287
  st.session_state.REPO_CONTENT = repo_contents_json
288
  st.success("Repository content refreshed successfully.")
289
+
290
+ # Update the system prompts with the new repo content
291
+ st.session_state.task_system_prompt = task_system_prompt_template.format(REPO_CONTENT=st.session_state.REPO_CONTENT)
292
+ st.session_state.qa_system_prompt = qa_system_prompt_template.format(REPO_CONTENT=st.session_state.REPO_CONTENT)
293
+
294
+ # Recreate the graphs with the updated system prompts
295
+ global task_graph, qa_graph
296
  if st.session_state.use_sonnet and "ANTHROPIC_API_KEY" in os.environ:
297
  new_llm = ChatAnthropic(temperature=0, model_name="claude-3-5-sonnet-20240620")
298
  else:
299
  new_llm = ChatAnthropic(temperature=0, model_name="claude-3-haiku-20240307")
300
 
301
+ task_graph = create_react_agent(
302
  new_llm,
303
  tools=tools,
304
+ messages_modifier=st.session_state.task_system_prompt,
305
  checkpointer=memory
306
  )
307
+
308
+ qa_graph = create_react_agent(
309
+ new_llm,
310
+ messages_modifier=st.session_state.qa_system_prompt
311
+ )
312
 
313
  if st.session_state.use_sonnet and "ANTHROPIC_API_KEY" in os.environ:
314
  refresh_repo_data()
 
321
  if "system_prompt" not in st.session_state:
322
  st.session_state.system_prompt = system_prompt_template.format(REPO_CONTENT=st.session_state.REPO_CONTENT)
323
 
324
+ # Create separate graphs for Task and Q/A modes
325
+ task_graph = create_react_agent(
326
  llm,
327
  tools=tools,
328
+ messages_modifier=st.session_state.task_system_prompt,
329
  checkpointer=memory
330
  )
331
+
332
+ qa_graph = create_react_agent(
333
+ llm,
334
+ messages_modifier=st.session_state.qa_system_prompt
335
+ )
336
+
337
  async def run_github_editor(query: str, thread_id: str = "default"):
338
  inputs = {"messages": [HumanMessage(content=query)]}
339
  config = {
340
  "configurable": {"thread_id": thread_id},
341
+ "recursion_limit": 50
342
  }
343
+
344
  st.write(f"Human: {query}\n")
345
+
346
  current_thought = ""
347
+
348
+ graph = task_graph if mode == "Task" else qa_graph
349
+
350
  async for event in graph.astream_events(inputs, config=config, version="v2"):
351
  kind = event["event"]
352
  if kind == "on_chat_model_start":
 
363
  current_thought = ""
364
  else:
365
  st.write(content, end="")
366
+ elif kind == "on_tool_start" and mode == "Task":
367
  st.write(f"\nUsing tool: {event['name']}")
368
+ elif kind == "on_tool_end" and mode == "Task":
369
  st.write(f"Tool result: {event['data']['output']}\n")
370
 
371
  # Create a session state variable to store the chat messages. This ensures that the
 
384
 
385
  # Create a chat input field to allow the user to enter a message. This will display
386
  # automatically at the bottom of the page.
387
+ if prompt := st.chat_input(f"{'Ask a question' if mode == 'Q/A' else 'Give me a Task'}!"):
 
388
  # Store and display the current prompt.
389
  st.session_state.messages.append({"role": "user", "content": prompt})
390
  with st.chat_message("user"):
391
  st.markdown(prompt)
392
+
393
  # Generate a response using the custom chatbot logic.
394
  asyncio.run(run_github_editor(prompt))