Wendong-Fan commited on
Commit
26aa535
·
1 Parent(s): 92ee0c5

fix async bug, optimize log info, tool call error handling

Browse files
README.md CHANGED
@@ -179,7 +179,7 @@ source .venv/bin/activate
179
  .venv\Scripts\activate
180
 
181
  # Install from requirements.txt
182
- pip install -r requirements.txt
183
  ```
184
 
185
  ## Option 3: Using conda
@@ -201,7 +201,7 @@ conda activate owl
201
  pip install -e .
202
 
203
  # Option 2: Install from requirements.txt
204
- pip install -r requirements.txt
205
 
206
  # Exit the conda environment when done
207
  conda deactivate
 
179
  .venv\Scripts\activate
180
 
181
  # Install from requirements.txt
182
+ pip install -r requirements.txt --use-pep517
183
  ```
184
 
185
  ## Option 3: Using conda
 
201
  pip install -e .
202
 
203
  # Option 2: Install from requirements.txt
204
+ pip install -r requirements.txt --use-pep517
205
 
206
  # Exit the conda environment when done
207
  conda deactivate
README_zh.md CHANGED
@@ -176,7 +176,7 @@ source .venv/bin/activate
176
  .venv\Scripts\activate
177
 
178
  # 从 requirements.txt 安装
179
- pip install -r requirements.txt
180
  ```
181
 
182
  ## 选项3:使用 conda
@@ -198,7 +198,7 @@ conda activate owl
198
  pip install -e .
199
 
200
  # 选项2:从 requirements.txt 安装
201
- pip install -r requirements.txt
202
 
203
  # 完成后退出 conda 环境
204
  conda deactivate
 
176
  .venv\Scripts\activate
177
 
178
  # 从 requirements.txt 安装
179
+ pip install -r requirements.txt --use-pep517
180
  ```
181
 
182
  ## 选项3:使用 conda
 
198
  pip install -e .
199
 
200
  # 选项2:从 requirements.txt 安装
201
+ pip install -r requirements.txt --use-pep517
202
 
203
  # 完成后退出 conda 环境
204
  conda deactivate
owl/run_azure_openai.py CHANGED
@@ -47,7 +47,7 @@ def construct_society(question: str) -> OwlRolePlaying:
47
  # Create models for different components using Azure OpenAI
48
  base_model_config = {
49
  "model_platform": ModelPlatformType.AZURE,
50
- "model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
51
  "model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
52
  }
53
 
 
47
  # Create models for different components using Azure OpenAI
48
  base_model_config = {
49
  "model_platform": ModelPlatformType.AZURE,
50
+ "model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
51
  "model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
52
  }
53
 
owl/utils/enhanced_role_playing.py CHANGED
@@ -437,7 +437,61 @@ class OwlGAIARolePlaying(OwlRolePlaying):
437
  )
438
 
439
 
440
- async def run_society(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
  society: OwlRolePlaying,
442
  round_limit: int = 15,
443
  ) -> Tuple[str, List[dict], dict]:
 
437
  )
438
 
439
 
440
+ def run_society(
441
+ society: OwlRolePlaying,
442
+ round_limit: int = 15,
443
+ ) -> Tuple[str, List[dict], dict]:
444
+ overall_completion_token_count = 0
445
+ overall_prompt_token_count = 0
446
+
447
+ chat_history = []
448
+ init_prompt = """
449
+ Now please give me instructions to solve over overall task step by step. If the task requires some specific knowledge, please instruct me to use tools to complete the task.
450
+ """
451
+ input_msg = society.init_chat(init_prompt)
452
+ for _round in range(round_limit):
453
+ assistant_response, user_response = society.step(input_msg)
454
+ overall_completion_token_count += (
455
+ assistant_response.info["usage"]["completion_tokens"]
456
+ + user_response.info["usage"]["completion_tokens"]
457
+ )
458
+
459
+ # convert tool call to dict
460
+ tool_call_records: List[dict] = []
461
+ for tool_call in assistant_response.info["tool_calls"]:
462
+ tool_call_records.append(tool_call.as_dict())
463
+
464
+ _data = {
465
+ "user": user_response.msg.content,
466
+ "assistant": assistant_response.msg.content,
467
+ "tool_calls": tool_call_records,
468
+ }
469
+
470
+ chat_history.append(_data)
471
+ logger.info(f"Round #{_round} user_response:\n {user_response.msgs[0].content}")
472
+ logger.info(
473
+ f"Round #{_round} assistant_response:\n {assistant_response.msgs[0].content}"
474
+ )
475
+
476
+ if (
477
+ assistant_response.terminated
478
+ or user_response.terminated
479
+ or "TASK_DONE" in user_response.msg.content
480
+ ):
481
+ break
482
+
483
+ input_msg = assistant_response.msg
484
+
485
+ answer = chat_history[-1]["assistant"]
486
+ token_info = {
487
+ "completion_token_count": overall_completion_token_count,
488
+ "prompt_token_count": overall_prompt_token_count,
489
+ }
490
+
491
+ return answer, chat_history, token_info
492
+
493
+
494
+ async def arun_society(
495
  society: OwlRolePlaying,
496
  round_limit: int = 15,
497
  ) -> Tuple[str, List[dict], dict]:
pyproject.toml CHANGED
@@ -21,7 +21,7 @@ keywords = [
21
  "learning-systems"
22
  ]
23
  dependencies = [
24
- "camel-ai[all]==0.2.28",
25
  "chunkr-ai>=0.0.41",
26
  "docx2markdown>=0.1.1",
27
  "gradio>=3.50.2",
 
21
  "learning-systems"
22
  ]
23
  dependencies = [
24
+ "camel-ai[all]==0.2.29",
25
  "chunkr-ai>=0.0.41",
26
  "docx2markdown>=0.1.1",
27
  "gradio>=3.50.2",
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- camel-ai[all]==0.2.28
2
  chunkr-ai>=0.0.41
3
  docx2markdown>=0.1.1
4
  gradio>=3.50.2
 
1
+ camel-ai[all]==0.2.29
2
  chunkr-ai>=0.0.41
3
  docx2markdown>=0.1.1
4
  gradio>=3.50.2
run_app.py CHANGED
@@ -49,7 +49,9 @@ def main():
49
  print(
50
  f"Error: Unable to import necessary modules. Please ensure all dependencies are installed: {e}"
51
  )
52
- print("Tip: Run 'pip install -r requirements.txt' to install all dependencies")
 
 
53
  sys.exit(1)
54
  except Exception as e:
55
  print(f"Error occurred while starting the application: {e}")
 
49
  print(
50
  f"Error: Unable to import necessary modules. Please ensure all dependencies are installed: {e}"
51
  )
52
+ print(
53
+ "Tip: Run 'pip install -r requirements.txt --use-pep517' to install all dependencies"
54
+ )
55
  sys.exit(1)
56
  except Exception as e:
57
  print(f"Error occurred while starting the application: {e}")
run_app_zh.py CHANGED
@@ -47,7 +47,9 @@ def main():
47
 
48
  except ImportError as e:
49
  print(f"错误: 无法导入必要的模块。请确保已安装所有依赖项: {e}")
50
- print("提示: 运行 'pip install -r requirements.txt' 安装所有依赖项")
 
 
51
  sys.exit(1)
52
  except Exception as e:
53
  print(f"启动应用程序时出错: {e}")
 
47
 
48
  except ImportError as e:
49
  print(f"错误: 无法导入必要的模块。请确保已安装所有依赖项: {e}")
50
+ print(
51
+ "提示: 运行 'pip install -r requirements.txt --use-pep517' 安装所有依赖项"
52
+ )
53
  sys.exit(1)
54
  except Exception as e:
55
  print(f"启动应用程序时出错: {e}")
uv.lock CHANGED
@@ -482,7 +482,7 @@ wheels = [
482
 
483
  [[package]]
484
  name = "camel-ai"
485
- version = "0.2.28"
486
  source = { registry = "https://pypi.org/simple" }
487
  dependencies = [
488
  { name = "colorama" },
@@ -499,9 +499,9 @@ dependencies = [
499
  { name = "pyyaml" },
500
  { name = "tiktoken" },
501
  ]
502
- sdist = { url = "https://files.pythonhosted.org/packages/6a/3b/7f350ae3c5bf42263688d3a69333e3908af4d45ce8f5f838af634a2720b3/camel_ai-0.2.28.tar.gz", hash = "sha256:f47e12bdf59df6e789db4587f0c5bd0adf43b2029d6be1bfcc31bfd41cab9d9f", size = 443082 }
503
  wheels = [
504
- { url = "https://files.pythonhosted.org/packages/5d/27/8a6e97f660354ce03413872268c7f4a40ceefdf39b20f161cb7f672dc67c/camel_ai-0.2.28-py3-none-any.whl", hash = "sha256:079e7e905a36b64be47a6a27ad4b99d21ca0403b27027a4d777744968a22040a", size = 748237 },
505
  ]
506
 
507
  [package.optional-dependencies]
@@ -3622,7 +3622,7 @@ dependencies = [
3622
 
3623
  [package.metadata]
3624
  requires-dist = [
3625
- { name = "camel-ai", extras = ["all"], specifier = "==0.2.28" },
3626
  { name = "chunkr-ai", specifier = ">=0.0.41" },
3627
  { name = "docx2markdown", specifier = ">=0.1.1" },
3628
  { name = "gradio", specifier = ">=3.50.2" },
 
482
 
483
  [[package]]
484
  name = "camel-ai"
485
+ version = "0.2.29"
486
  source = { registry = "https://pypi.org/simple" }
487
  dependencies = [
488
  { name = "colorama" },
 
499
  { name = "pyyaml" },
500
  { name = "tiktoken" },
501
  ]
502
+ sdist = { url = "https://files.pythonhosted.org/packages/00/f8/fdb2478ec3b61f78af2a8a8ab0b575e795a015e89c2c058cee61d63a3951/camel_ai-0.2.29.tar.gz", hash = "sha256:b077885ea7a1fd6b4d53dd77e83b6b4c2ded96e43ced6a2f4bd51a434a29bbdb", size = 440795 }
503
  wheels = [
504
+ { url = "https://files.pythonhosted.org/packages/2b/c4/4c0c388464d4c8f8ec7704d39459883e0769268b566a82245f545b09f703/camel_ai-0.2.29-py3-none-any.whl", hash = "sha256:812143a204e364703be40066101c0cf34769bc589dac81373444acc6bab8fe7b", size = 746424 },
505
  ]
506
 
507
  [package.optional-dependencies]
 
3622
 
3623
  [package.metadata]
3624
  requires-dist = [
3625
+ { name = "camel-ai", extras = ["all"], specifier = "==0.2.29" },
3626
  { name = "chunkr-ai", specifier = ">=0.0.41" },
3627
  { name = "docx2markdown", specifier = ">=0.1.1" },
3628
  { name = "gradio", specifier = ">=3.50.2" },