CultriX commited on
Commit
69af1c5
·
verified ·
1 Parent(s): ffea40d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -125
app.py CHANGED
@@ -6,28 +6,27 @@ import threading
6
  import queue
7
  import gradio as gr
8
  import httpx
9
- from typing import Generator, Any, Dict, List
10
 
11
  # -------------------- Configuration --------------------
12
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
13
 
14
  # -------------------- External Model Call --------------------
15
- async def call_model(prompt: str, model: str = "gpt-4o-mini", api_key: str = None) -> str:
16
  """
17
- Sends a prompt to the OpenAI API endpoint using the specified model (overridden to gpt-4o-mini)
18
- and returns the generated response.
19
  """
20
- # Use the provided API key or fall back to the environment variable
21
  if api_key is None:
22
  api_key = os.getenv("OPENAI_API_KEY")
 
 
23
  url = "https://api.openai.com/v1/chat/completions"
24
  headers = {
25
  "Authorization": f"Bearer {api_key}",
26
  "Content-Type": "application/json"
27
  }
28
- # Override the model value to always be "gpt-4o-mini"
29
  payload = {
30
- "model": "gpt-4o-mini",
31
  "messages": [{"role": "user", "content": prompt}],
32
  }
33
  async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client:
@@ -39,191 +38,214 @@ async def call_model(prompt: str, model: str = "gpt-4o-mini", api_key: str = Non
39
  # -------------------- Agent Classes --------------------
40
  class PromptOptimizerAgent:
41
  async def optimize_prompt(self, user_prompt: str, api_key: str) -> str:
42
- """
43
- Optimizes the user's initial prompt according to the following instructions:
44
- >>> Given the user's initial prompt below the ### characters please enhance it.
45
- 1. Start with clear, precise instructions placed at the beginning of the prompt.
46
- 2. Include specific details about the desired context, outcome, length, format, and style.
47
- 3. Provide examples of the desired output format, if possible.
48
- 4. Use appropriate leading words or phrases to guide the desired output, especially if code generation is involved.
49
- 5. Avoid any vague or imprecise language.
50
- 6. Rather than only stating what not to do, provide guidance on what should be done instead.
51
- Remember to ensure the revised prompt remains true to the user's original intent. <<<
52
- ###User initial prompt below ###
53
- """
54
  system_prompt = (
55
- "Given the user's initial prompt below the ### characters please enhance it. "
56
- "1. Start with clear, precise instructions placed at the beginning of the prompt. "
57
- "2. Include specific details about the desired context, outcome, length, format, and style. "
58
- "3. Provide examples of the desired output format, if possible. "
59
- "4. Use appropriate leading words or phrases to guide the desired output, especially if code generation is involved. "
60
- "5. Avoid any vague or imprecise language. "
61
- "6. Rather than only stating what not to do, provide guidance on what should be done instead. "
62
- "Remember to ensure the revised prompt remains true to the user's original intent. "
63
- "###User initial prompt ###"
64
  )
65
- full_prompt = f"{system_prompt}\n{user_prompt}\n<<<"
66
- optimized = await call_model(full_prompt, api_key=api_key)
67
  return optimized
68
 
69
  class OrchestratorAgent:
70
- def __init__(self, log_queue: queue.Queue) -> None:
71
  self.log_queue = log_queue
 
 
72
 
73
- async def generate_plan(self, task: str, api_key: str) -> str:
74
  """
75
- Generates a detailed, step-by-step plan for completing the given task.
76
  """
 
 
 
 
 
 
 
 
 
 
 
77
  prompt = (
78
- f"You are an orchestrator agent. The user has provided the task: '{task}'.\n"
79
- "Generate a detailed, step-by-step plan for completing this task by coordinating a coder agent, "
80
- "a code reviewer agent, and a documentation agent. List the steps as bullet points."
 
 
 
 
 
81
  )
82
- plan = await call_model(prompt, api_key=api_key)
 
 
 
 
 
 
 
 
 
 
 
 
83
  return plan
84
 
85
  class CoderAgent:
86
- async def generate_code(self, instructions: str, api_key: str) -> str:
87
- """
88
- Generates code based on the given instructions.
89
- """
90
  prompt = (
91
- "You are a coder agent. Based on the following instructions, generate the requested code. "
92
- "Only output the generated code, never any explanations or any other information besides the actual code!\n"
93
- f"{instructions}\n"
94
  )
95
- code = await call_model(prompt, api_key=api_key)
96
  return code
97
 
98
  class CodeReviewerAgent:
99
  async def review_code(self, code: str, task: str, api_key: str) -> str:
100
- """
101
- Reviews the provided code to check if it meets the task specifications.
102
- NEVER generate any code yourself! Respond only with feedback or with 'APPROVE' if everything is correct.
103
- """
104
  prompt = (
105
- "You are a code reviewing agent highly skilled in evaluating code quality. "
106
- "Review the provided code and check if it meets the task specifications and properly addresses any provided feedback. "
107
- "NEVER generate any code yourself! Respond only with feedback or with 'APPROVE' if everything is correct. "
108
- "Do not mention 'APPROVE' before actually approving! Do not request documentation or user guides:\n"
109
- f"Task: {task}\n"
110
- f"Code:\n{code}\n\n"
111
  )
112
- review = await call_model(prompt, api_key=api_key)
113
  return review
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  class DocumentationAgent:
116
  async def generate_documentation(self, code: str, api_key: str) -> str:
117
- """
118
- Generates clear and concise documentation for the approved code,
119
- including a brief and concise --help message.
120
- """
121
  prompt = (
122
- "You are a documentation agent. Generate a brief, clear and concise documentation for the following approved code. "
123
- "Keep it short and compact, focusing on the main elements, do not include unnecessary extras that limit readability. "
124
- "Additionally, generate a brief and concise --help message for the code:\n"
125
- f"{code}\n"
126
- "Briefly explain what the code does and how it works. Make sure to be clear and concise, do not include unnecessary extras that limit readability."
127
  )
128
- documentation = await call_model(prompt, api_key=api_key)
129
  return documentation
130
 
131
  # -------------------- Multi-Agent Conversation --------------------
132
- async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str) -> None:
133
  """
134
- Conducts a multi-agent conversation where each agent's response is generated via the external model API.
135
- The conversation is logged to the provided queue.
136
  """
137
- conversation: List[Dict[str, str]] = [] # List to store each agent's message
138
 
139
- # Step 0: Use Prompt Optimizer to enhance the user's initial prompt.
140
- log_queue.put("[Prompt Optimizer]: Received initial task. Optimizing prompt...")
141
  prompt_optimizer = PromptOptimizerAgent()
142
  optimized_task = await prompt_optimizer.optimize_prompt(task_message, api_key=api_key)
143
  conversation.append({"agent": "Prompt Optimizer", "message": f"Optimized Task:\n{optimized_task}"})
144
  log_queue.put(f"[Prompt Optimizer]: Optimized task prompt:\n{optimized_task}")
145
 
146
- # Step 1: Orchestrator generates a plan based on the optimized task.
147
- log_queue.put("[Orchestrator]: Received optimized task. Generating plan...")
148
- orchestrator = OrchestratorAgent(log_queue)
149
  plan = await orchestrator.generate_plan(optimized_task, api_key=api_key)
150
  conversation.append({"agent": "Orchestrator", "message": f"Plan:\n{plan}"})
151
  log_queue.put(f"[Orchestrator]: Plan generated:\n{plan}")
152
 
153
- # Step 2: Coder generates code based on the plan.
154
  coder = CoderAgent()
155
- coder_instructions = f"Implement the task as described in the following plan:\n{plan}"
156
- log_queue.put("[Coder]: Received coding task from the Orchestrator.")
157
  code = await coder.generate_code(coder_instructions, api_key=api_key)
158
  conversation.append({"agent": "Coder", "message": f"Code:\n{code}"})
159
  log_queue.put(f"[Coder]: Code generated:\n{code}")
160
 
161
- # Step 3: Code Reviewer reviews the generated code.
162
  reviewer = CodeReviewerAgent()
 
163
  approval_keyword = "approve"
164
  revision_iteration = 0
165
  while True:
166
- if revision_iteration == 0:
167
- log_queue.put("[Code Reviewer]: Starting review of the generated code...")
168
- else:
169
- log_queue.put(f"[Code Reviewer]: Reviewing the revised code (Iteration {revision_iteration})...")
170
-
171
  review = await reviewer.review_code(code, optimized_task, api_key=api_key)
172
  conversation.append({"agent": "Code Reviewer", "message": f"Review (Iteration {revision_iteration}):\n{review}"})
173
- log_queue.put(f"[Code Reviewer]: Review feedback (Iteration {revision_iteration}):\n{review}")
174
 
175
- # Check if the code has been approved
176
  if approval_keyword in review.lower():
177
  log_queue.put("[Code Reviewer]: Code approved.")
178
- break # Exit the loop if approved
179
 
180
- # If not approved, increment the revision count.
181
  revision_iteration += 1
182
-
183
- # Kill-switch: After 5 generations without approval, shut down.
184
  if revision_iteration >= 5:
185
- log_queue.put("Unable to solve your task to full satisfaction :(")
186
- sys.exit("Unable to solve your task to full satisfaction :(")
 
 
 
 
 
187
 
188
- # If under the limit, instruct the coder to revise the code.
189
- log_queue.put(f"[Orchestrator]: Code not approved. Instructing coder to revise the code (Iteration {revision_iteration}).")
190
- update_instructions = f"Please revise the code according to the following feedback. Feedback: {review}"
191
- revised_code = await coder.generate_code(update_instructions, api_key=api_key)
 
 
 
 
192
  conversation.append({"agent": "Coder", "message": f"Revised Code (Iteration {revision_iteration}):\n{revised_code}"})
193
- log_queue.put(f"[Coder]: Revised code submitted (Iteration {revision_iteration}):\n{revised_code}")
194
- code = revised_code # Update the code for the next review iteration
195
 
196
- # Step 4: Documentation Agent generates documentation for the approved code.
197
  doc_agent = DocumentationAgent()
198
- log_queue.put("[Documentation Agent]: Generating documentation for the approved code.")
199
  documentation = await doc_agent.generate_documentation(code, api_key=api_key)
200
  conversation.append({"agent": "Documentation Agent", "message": f"Documentation:\n{documentation}"})
201
  log_queue.put(f"[Documentation Agent]: Documentation generated:\n{documentation}")
202
 
203
- log_queue.put("Multi-agent conversation complete.")
204
  log_queue.put(("result", conversation))
205
 
206
- # -------------------- Process Generator for Streaming --------------------
207
- def process_conversation_generator(task_message: str, api_key: str) -> Generator[str, None, None]:
208
  """
209
- Wraps the asynchronous multi-agent conversation and yields log messages as they are generated.
210
  """
211
  log_q: queue.Queue = queue.Queue()
212
 
213
  def run_conversation() -> None:
214
- asyncio.run(multi_agent_conversation(task_message, log_q, api_key))
215
 
216
  thread = threading.Thread(target=run_conversation)
217
  thread.start()
218
 
219
  final_result = None
220
- # Yield log messages as long as the thread is running or the queue is not empty.
221
  while thread.is_alive() or not log_q.empty():
222
  try:
223
  msg = log_q.get(timeout=0.1)
224
  if isinstance(msg, tuple) and msg[0] == "result":
225
  final_result = msg[1]
226
- yield "Final conversation complete."
227
  else:
228
  yield msg
229
  except queue.Empty:
@@ -231,39 +253,80 @@ def process_conversation_generator(task_message: str, api_key: str) -> Generator
231
 
232
  thread.join()
233
  if final_result:
234
- # Format the final conversation log.
235
- conv_text = "\n========== Multi-Agent Conversation ==========\n"
236
  for entry in final_result:
237
  conv_text += f"[{entry['agent']}]: {entry['message']}\n\n"
238
  yield conv_text
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  # -------------------- Chat Function for Gradio --------------------
241
  def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = None) -> Generator[str, None, None]:
242
- """
243
- Chat function for Gradio.
244
- The user's message is interpreted as the task description.
245
- An optional OpenAI API key can be provided via the additional input; if not provided, the environment variable is used.
246
- This function streams the multi-agent conversation log messages.
247
- """
248
  if not openai_api_key:
249
  openai_api_key = os.getenv("OPENAI_API_KEY")
250
- yield from process_conversation_generator(message, openai_api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
  # -------------------- Launch the Chatbot --------------------
253
- # Use Gradio's ChatInterface with an additional input field for the OpenAI API key.
 
254
  iface = gr.ChatInterface(
255
  fn=multi_agent_chat,
256
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
257
- type="messages",
258
- title="Actual Multi-Agent Conversation Chatbot",
259
  description="""
260
- - Collaborative workflow between Prompt Enhancer, Orchestrator, Coder, Code-Reviewer and Documentation Generator agents.
261
- - Enter a task and observe as your prompt gets magically solved! :)
262
- - NOTE: The full conversation log will be displayed at the end, showing all the steps taken!
263
- - NOTE2: If the Coder is unable to satisfactorily complete the task after five attempts, the script will terminate to prevent endless iterations.
264
- - NOTE3: You will have to input your OPENAI_API_KEY at the bottom of the page for this to work!
265
  """
266
  )
267
 
 
 
 
268
  if __name__ == "__main__":
269
- iface.launch()
 
 
6
  import queue
7
  import gradio as gr
8
  import httpx
9
+ from typing import Generator, Any, Dict, List, Optional
10
 
11
  # -------------------- Configuration --------------------
12
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
13
 
14
  # -------------------- External Model Call --------------------
15
+ async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None) -> str:
16
  """
17
+ Sends a prompt to the OpenAI API endpoint.
 
18
  """
 
19
  if api_key is None:
20
  api_key = os.getenv("OPENAI_API_KEY")
21
+ if api_key is None:
22
+ raise ValueError("OpenAI API key not found.")
23
  url = "https://api.openai.com/v1/chat/completions"
24
  headers = {
25
  "Authorization": f"Bearer {api_key}",
26
  "Content-Type": "application/json"
27
  }
 
28
  payload = {
29
+ "model": model,
30
  "messages": [{"role": "user", "content": prompt}],
31
  }
32
  async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client:
 
38
  # -------------------- Agent Classes --------------------
39
  class PromptOptimizerAgent:
40
  async def optimize_prompt(self, user_prompt: str, api_key: str) -> str:
41
+ """Optimizes the user's initial prompt."""
 
 
 
 
 
 
 
 
 
 
 
42
  system_prompt = (
43
+ "You are a prompt optimization expert. Improve the given user prompt. "
44
+ "Be clear, specific, and complete. Maintain the user's original intent."
45
+ "Return ONLY the revised prompt."
 
 
 
 
 
 
46
  )
47
+ full_prompt = f"{system_prompt}\n\nUser's initial prompt:\n{user_prompt}"
48
+ optimized = await call_model(full_prompt, model="gpt-4o", api_key=api_key)
49
  return optimized
50
 
51
  class OrchestratorAgent:
52
+ def __init__(self, log_queue: queue.Queue, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
53
  self.log_queue = log_queue
54
+ self.human_in_the_loop_event = human_in_the_loop_event
55
+ self.human_input_queue = human_input_queue
56
 
57
+ async def generate_plan(self, task: str, api_key: str, human_feedback: Optional[str] = None) -> str:
58
  """
59
+ Generates a plan, potentially requesting human feedback.
60
  """
61
+ if human_feedback: # Use human feedback if provided
62
+ prompt = (
63
+ f"You are a master planner. You previously generated a partial plan for the task: '{task}'.\n"
64
+ "You requested human feedback, and here's the feedback you received:\n"
65
+ f"{human_feedback}\n\n"
66
+ "Now, complete or revise the plan, incorporating the human feedback. "
67
+ "Output the plan as a numbered list."
68
+ )
69
+ plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
70
+ return plan
71
+
72
  prompt = (
73
+ f"You are a master planner. Given the task: '{task}', create a detailed, step-by-step plan. "
74
+ "Break down the task into sub-tasks. Assign each sub-task to agents: Coder, Code Reviewer, Quality Assurance Tester, and Documentation Agent. "
75
+ "Include steps for review and revision. Consider potential issues and error handling. "
76
+ "Include instructions for documentation.\n\n"
77
+ "HOWEVER, if at ANY point you are unsure how to proceed, you can request human feedback. "
78
+ "To do this, output ONLY the following phrase (and nothing else): 'REQUEST_HUMAN_FEEDBACK'\n"
79
+ "Followed by a newline and a clear and concise question for the human. Example:\n\nREQUEST_HUMAN_FEEDBACK\nShould the output be in JSON or XML format?"
80
+ "\n\nOutput the plan as a numbered list (or as much as you can before requesting feedback)."
81
  )
82
+ plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
83
+
84
+ if "REQUEST_HUMAN_FEEDBACK" in plan:
85
+ self.log_queue.put("[Orchestrator]: Requesting human feedback...")
86
+ question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
87
+ self.log_queue.put(f"[Orchestrator]: Question for human: {question}")
88
+ self.human_in_the_loop_event.set() # Signal the human input thread
89
+ human_response = self.human_input_queue.get() # Wait for human input
90
+ self.human_in_the_loop_event.clear() # Reset the event
91
+ self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
92
+ return await self.generate_plan(task, api_key, human_response) # Recursive call with feedback
93
+
94
+
95
  return plan
96
 
97
  class CoderAgent:
98
+ async def generate_code(self, instructions: str, api_key: str, model: str = "gpt-4o") -> str:
99
+ """Generates code based on instructions."""
 
 
100
  prompt = (
101
+ "You are a highly skilled coding agent. Output ONLY the code. "
102
+ "Adhere to best practices. Include error handling.\n\n"
103
+ f"Instructions:\n{instructions}"
104
  )
105
+ code = await call_model(prompt, model=model, api_key=api_key)
106
  return code
107
 
108
  class CodeReviewerAgent:
109
  async def review_code(self, code: str, task: str, api_key: str) -> str:
110
+ """Reviews code. Provides concise, actionable feedback or 'APPROVE'."""
 
 
 
111
  prompt = (
112
+ "You are a meticulous code reviewer. Provide CONCISE feedback. "
113
+ "Focus on correctness, efficiency, readability, error handling, security, and adherence to the task. "
114
+ "Suggest improvements. If acceptable, respond with ONLY 'APPROVE'. "
115
+ "Do NOT generate code.\n\n"
116
+ f"Task: {task}\n\nCode:\n{code}"
 
117
  )
118
+ review = await call_model(prompt, model="gpt-4o", api_key=api_key)
119
  return review
120
 
121
+ class QualityAssuranceTesterAgent:
122
+ async def generate_test_cases(self, code: str, task: str, api_key: str) -> str:
123
+ """Generates test cases."""
124
+ prompt = (
125
+ "You are a quality assurance testing agent. Generate test cases. "
126
+ "Consider edge cases and error scenarios. Output in a clear format.\n\n"
127
+ f"Task: {task}\n\nCode:\n{code}"
128
+ )
129
+ test_cases = await call_model(prompt, model="gpt-4o", api_key=api_key)
130
+ return test_cases
131
+
132
+ async def run_tests(self, code:str, test_cases:str, api_key:str) -> str:
133
+ """Runs tests and reports results."""
134
+ prompt = (
135
+ "Run the generated test cases. Compare actual vs expected output. "
136
+ "State discrepancies. If all pass, output 'TESTS PASSED'.\n\n"
137
+ f"Code:\n{code}\n\nTest Cases:\n{test_cases}"
138
+ )
139
+ test_results = await call_model(prompt, model="gpt-4o", api_key=api_key)
140
+ return test_results
141
+
142
  class DocumentationAgent:
143
  async def generate_documentation(self, code: str, api_key: str) -> str:
144
+ """Generates documentation, including a --help message."""
 
 
 
145
  prompt = (
146
+ "Generate clear and concise documentation. "
147
+ "Include a brief description, explanation, and a --help message.\n\n"
148
+ f"Code:\n{code}"
 
 
149
  )
150
+ documentation = await call_model(prompt, model="gpt-4o", api_key=api_key)
151
  return documentation
152
 
153
  # -------------------- Multi-Agent Conversation --------------------
154
+ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
155
  """
156
+ Conducts the multi-agent conversation.
 
157
  """
158
+ conversation: List[Dict[str, str]] = []
159
 
160
+ # Step 0: Optimize Prompt
161
+ log_queue.put("[Prompt Optimizer]: Optimizing prompt...")
162
  prompt_optimizer = PromptOptimizerAgent()
163
  optimized_task = await prompt_optimizer.optimize_prompt(task_message, api_key=api_key)
164
  conversation.append({"agent": "Prompt Optimizer", "message": f"Optimized Task:\n{optimized_task}"})
165
  log_queue.put(f"[Prompt Optimizer]: Optimized task prompt:\n{optimized_task}")
166
 
167
+ # Step 1: Generate Plan
168
+ log_queue.put("[Orchestrator]: Generating plan...")
169
+ orchestrator = OrchestratorAgent(log_queue, human_in_the_loop_event, human_input_queue)
170
  plan = await orchestrator.generate_plan(optimized_task, api_key=api_key)
171
  conversation.append({"agent": "Orchestrator", "message": f"Plan:\n{plan}"})
172
  log_queue.put(f"[Orchestrator]: Plan generated:\n{plan}")
173
 
174
+ # Step 2: Generate Code
175
  coder = CoderAgent()
176
+ coder_instructions = f"Implement the task:\n{plan}"
177
+ log_queue.put("[Coder]: Generating code...")
178
  code = await coder.generate_code(coder_instructions, api_key=api_key)
179
  conversation.append({"agent": "Coder", "message": f"Code:\n{code}"})
180
  log_queue.put(f"[Coder]: Code generated:\n{code}")
181
 
182
+ # Step 3: Code Review and Revision
183
  reviewer = CodeReviewerAgent()
184
+ tester = QualityAssuranceTesterAgent()
185
  approval_keyword = "approve"
186
  revision_iteration = 0
187
  while True:
188
+ log_queue.put(f"[Code Reviewer]: Reviewing code (Iteration {revision_iteration})...")
 
 
 
 
189
  review = await reviewer.review_code(code, optimized_task, api_key=api_key)
190
  conversation.append({"agent": "Code Reviewer", "message": f"Review (Iteration {revision_iteration}):\n{review}"})
191
+ log_queue.put(f"[Code Reviewer]: Review (Iteration {revision_iteration}):\n{review}")
192
 
 
193
  if approval_keyword in review.lower():
194
  log_queue.put("[Code Reviewer]: Code approved.")
195
+ break
196
 
 
197
  revision_iteration += 1
 
 
198
  if revision_iteration >= 5:
199
+ log_queue.put("Unable to solve task satisfactorily.")
200
+ sys.exit("Unable to solve task satisfactorily.")
201
+
202
+ log_queue.put("[QA Tester]: Generating test cases...")
203
+ test_cases = await tester.generate_test_cases(code, optimized_task, api_key=api_key)
204
+ conversation.append({"agent": "QA Tester", "message": f"Test Cases:\n{test_cases}"})
205
+ log_queue.put(f"[QA Tester]: Test Cases:\n{test_cases}")
206
 
207
+ log_queue.put("[QA Tester]: Running tests...")
208
+ test_results = await tester.run_tests(code, test_cases, api_key)
209
+ conversation.append({"agent": "QA Tester", "message": f"Test Results:\n{test_results}"})
210
+ log_queue.put(f"[QA Tester]: Test Results:\n{test_results}")
211
+
212
+ log_queue.put(f"[Orchestrator]: Revising code (Iteration {revision_iteration})...")
213
+ update_instructions = f"Revise:\nReview:\n{review}\nTests:\n{test_results}\nPlan:\n{plan}"
214
+ revised_code = await coder.generate_code(update_instructions, api_key=api_key, model="gpt-3.5-turbo-16k")
215
  conversation.append({"agent": "Coder", "message": f"Revised Code (Iteration {revision_iteration}):\n{revised_code}"})
216
+ log_queue.put(f"[Coder]: Revised (Iteration {revision_iteration}):\n{revised_code}")
217
+ code = revised_code
218
 
219
+ # Step 4: Generate Documentation
220
  doc_agent = DocumentationAgent()
221
+ log_queue.put("[Documentation Agent]: Generating documentation...")
222
  documentation = await doc_agent.generate_documentation(code, api_key=api_key)
223
  conversation.append({"agent": "Documentation Agent", "message": f"Documentation:\n{documentation}"})
224
  log_queue.put(f"[Documentation Agent]: Documentation generated:\n{documentation}")
225
 
226
+ log_queue.put("Conversation complete.")
227
  log_queue.put(("result", conversation))
228
 
229
+ # -------------------- Process Generator and Human Input --------------------
230
+ def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
231
  """
232
+ Wraps the conversation and yields log messages. Handles human input.
233
  """
234
  log_q: queue.Queue = queue.Queue()
235
 
236
  def run_conversation() -> None:
237
+ asyncio.run(multi_agent_conversation(task_message, log_q, api_key, human_in_the_loop_event, human_input_queue))
238
 
239
  thread = threading.Thread(target=run_conversation)
240
  thread.start()
241
 
242
  final_result = None
 
243
  while thread.is_alive() or not log_q.empty():
244
  try:
245
  msg = log_q.get(timeout=0.1)
246
  if isinstance(msg, tuple) and msg[0] == "result":
247
  final_result = msg[1]
248
+ yield "Conversation complete."
249
  else:
250
  yield msg
251
  except queue.Empty:
 
253
 
254
  thread.join()
255
  if final_result:
256
+ conv_text = "\n=== Conversation ===\n"
 
257
  for entry in final_result:
258
  conv_text += f"[{entry['agent']}]: {entry['message']}\n\n"
259
  yield conv_text
260
 
261
+ def get_human_feedback(placeholder_text):
262
+ """Gets human input using a Gradio Textbox."""
263
+ with gr.Blocks() as human_feedback_interface:
264
+ with gr.Row():
265
+ human_input = gr.Textbox(lines=4, placeholder=placeholder_text, label="Human Feedback")
266
+ with gr.Row():
267
+ submit_button = gr.Button("Submit Feedback")
268
+
269
+ feedback_queue = queue.Queue()
270
+
271
+ def submit_feedback(input_text):
272
+ feedback_queue.put(input_text)
273
+ return ""
274
+
275
+ submit_button.click(submit_feedback, inputs=human_input, outputs=human_input)
276
+ human_feedback_interface.load(None, [], []) # This is needed to keep the interface alive
277
+
278
+ return human_feedback_interface, feedback_queue
279
  # -------------------- Chat Function for Gradio --------------------
280
  def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = None) -> Generator[str, None, None]:
281
+ """Chat function for Gradio."""
 
 
 
 
 
282
  if not openai_api_key:
283
  openai_api_key = os.getenv("OPENAI_API_KEY")
284
+ if not openai_api_key:
285
+ yield "Error: API key not provided."
286
+ return
287
+ human_in_the_loop_event = threading.Event()
288
+ human_input_queue = queue.Queue()
289
+
290
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
291
+
292
+ while human_in_the_loop_event.is_set():
293
+ yield "Waiting for human feedback..."
294
+ placeholder = "Please provide your feedback."
295
+ human_interface, feedback_queue = get_human_feedback(placeholder)
296
+ #This is a hacky but currently only working way to make this work with gradio
297
+ yield gr.Textbox.update(visible=False), gr.update(visible=True)
298
+ try:
299
+ human_feedback = feedback_queue.get(timeout=300) # Wait for up to 5 minutes
300
+ human_input_queue.put(human_feedback)
301
+ human_in_the_loop_event.clear()
302
+ yield gr.Textbox.update(visible=True), human_interface.close()
303
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
304
+
305
+ except queue.Empty:
306
+ human_input_queue.put("No feedback provided.") #Timeout
307
+ human_in_the_loop_event.clear()
308
+ yield gr.Textbox.update(visible=True), human_interface.close()
309
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
310
 
311
  # -------------------- Launch the Chatbot --------------------
312
+
313
+ # Create the main chat interface
314
  iface = gr.ChatInterface(
315
  fn=multi_agent_chat,
316
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
317
+ title="Multi-Agent Task Solver with Human-in-the-Loop",
 
318
  description="""
319
+ - Collaborative workflow with Human-in-the-Loop capability.
320
+ - The Orchestrator can ask for human feedback if needed.
321
+ - Enter a task, and the agents will work on it. You may be prompted for input.
322
+ - Max 5 revision iterations.
323
+ - Provide your OpenAI API Key below.
324
  """
325
  )
326
 
327
+ #Need a dummy interface to make the human feedback interface update
328
+ dummy_iface = gr.Interface(lambda x:x, "textbox", "textbox")
329
+
330
  if __name__ == "__main__":
331
+ demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
332
+ demo.launch()