DonForDon commited on
Commit
8181b73
·
1 Parent(s): 72bb538

增加Ollama API

Browse files
Files changed (5) hide show
  1. README.md +3 -0
  2. README_zh.md +3 -0
  3. owl/.env_template +5 -2
  4. owl/app.py +9 -0
  5. owl/run_ollama.py +137 -0
README.md CHANGED
@@ -246,6 +246,9 @@ python owl/run_deepseek.py
246
 
247
  # Run with other OpenAI-compatible models
248
  python owl/run_openai_compatiable_model.py
 
 
 
249
  ```
250
 
251
  For a simpler version that only requires an LLM API key, you can try our minimal example:
 
246
 
247
  # Run with other OpenAI-compatible models
248
  python owl/run_openai_compatiable_model.py
249
+
250
+ # Run with Ollama API
251
+ python owl/run_ollama.py
252
  ```
253
 
254
  For a simpler version that only requires an LLM API key, you can try our minimal example:
README_zh.md CHANGED
@@ -246,6 +246,9 @@ python owl/run_deepseek.py
246
 
247
  # 使用其他 OpenAI 兼容模型运行
248
  python owl/run_openai_compatiable_model.py
 
 
 
249
  ```
250
 
251
  你可以通过修改 `run.py` 脚本来运行自己的任务:
 
246
 
247
  # 使用其他 OpenAI 兼容模型运行
248
  python owl/run_openai_compatiable_model.py
249
+
250
+ # 使用 Ollama API 运行
251
+ python owl/run_ollama.py
252
  ```
253
 
254
  你可以通过修改 `run.py` 脚本来运行自己的任务:
owl/.env_template CHANGED
@@ -1,8 +1,11 @@
1
  # MODEL & API (See https://github.com/camel-ai/camel/blob/master/camel/types/enums.py)
2
 
 
 
 
3
  # OPENAI API
4
- OPENAI_API_KEY = ""
5
- # OPENAI_API_BASE_URL = ""
6
 
7
  # Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
8
  # QWEN_API_KEY=""
 
1
  # MODEL & API (See https://github.com/camel-ai/camel/blob/master/camel/types/enums.py)
2
 
3
+ # Ollama API
4
+ OLLAMA_API_KEY=""
5
+
6
  # OPENAI API
7
+ OPENAI_API_KEY= ""
8
+ # OPENAI_API_BASE_URL=""
9
 
10
  # Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
11
  # QWEN_API_KEY=""
owl/app.py CHANGED
@@ -40,6 +40,7 @@ SCRIPTS = {
40
  "Default": "run.py",
41
  "GAIA Roleplaying": "run_gaia_roleplaying.py",
42
  "OpenAI Compatible": "run_openai_compatiable_model.py",
 
43
  }
44
 
45
  # 脚本描述
@@ -51,6 +52,7 @@ SCRIPT_DESCRIPTIONS = {
51
  "Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
52
  "GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
53
  "OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
 
54
  }
55
 
56
  # 环境变量分组
@@ -84,6 +86,13 @@ ENV_GROUPS = {
84
  "required": False,
85
  "help": "DeepSeek API密钥,用于访问DeepSeek模型。获取方式:https://platform.deepseek.com/api_keys",
86
  },
 
 
 
 
 
 
 
87
  ],
88
  "搜索工具": [
89
  {
 
40
  "Default": "run.py",
41
  "GAIA Roleplaying": "run_gaia_roleplaying.py",
42
  "OpenAI Compatible": "run_openai_compatiable_model.py",
43
+ "Ollama": "run_ollama.py",
44
  }
45
 
46
  # 脚本描述
 
52
  "Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
53
  "GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
54
  "OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
55
+ "Ollama": "使用Ollama API",
56
  }
57
 
58
  # 环境变量分组
 
86
  "required": False,
87
  "help": "DeepSeek API密钥,用于访问DeepSeek模型。获取方式:https://platform.deepseek.com/api_keys",
88
  },
89
+ {
90
+ "name": "OLLAMA_API_KEY",
91
+ "label": "Ollama API秘钥",
92
+ "type": "password",
93
+ "required": False,
94
+ "help": "Ollama API秘钥,没啥用的",
95
+ },
96
  ],
97
  "搜索工具": [
98
  {
owl/run_ollama.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ # run_ollama.py by tj-scripts(https://github.com/tj-scripts)
15
+ import os
16
+
17
+ from dotenv import load_dotenv
18
+ from camel.models import ModelFactory
19
+ from camel.toolkits import (
20
+ CodeExecutionToolkit,
21
+ ExcelToolkit,
22
+ ImageAnalysisToolkit,
23
+ SearchToolkit,
24
+ WebToolkit,
25
+ )
26
+ from camel.types import ModelPlatformType
27
+
28
+ from utils import OwlRolePlaying, run_society
29
+
30
+ from camel.logger import set_log_level
31
+
32
+ set_log_level(level="DEBUG")
33
+
34
+ load_dotenv()
35
+
36
+
37
+ def construct_society(question: str) -> OwlRolePlaying:
38
+ r"""Construct a society of agents based on the given question.
39
+
40
+ Args:
41
+ question (str): The task or question to be addressed by the society.
42
+
43
+ Returns:
44
+ OwlRolePlaying: A configured society of agents ready to address the question.
45
+ """
46
+
47
+ # Create models for different components
48
+ models = {
49
+ "user": ModelFactory.create(
50
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
51
+ model_type="qwen2.5:3b",
52
+ api_key=os.getenv("OLLAMA_API_KEY"),
53
+ url="http://localhost:11434/v1",
54
+ model_config_dict={"temperature": 0.8, "max_tokens": 4096},
55
+ ),
56
+ "assistant": ModelFactory.create(
57
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
58
+ model_type="qwen2.5:3b",
59
+ api_key=os.getenv("OLLAMA_API_KEY"),
60
+ url="http://localhost:11434/v1",
61
+ model_config_dict={"temperature": 0.2, "max_tokens": 4096},
62
+ ),
63
+ "web": ModelFactory.create(
64
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
65
+ model_type="llava:latest",
66
+ api_key=os.getenv("QWEN_API_KEY"),
67
+ url="http://localhost:11434/v1",
68
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
69
+ ),
70
+ "planning": ModelFactory.create(
71
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
72
+ model_type="qwen2.5:3b",
73
+ api_key=os.getenv("ollama"),
74
+ url="http://localhost:11434/v1",
75
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
76
+ ),
77
+ "image": ModelFactory.create(
78
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
79
+ model_type="llava:latest",
80
+ api_key=os.getenv("QWEN_API_KEY"),
81
+ url="http://localhost:11434/v1",
82
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
83
+ ),
84
+ }
85
+
86
+ # Configure toolkits
87
+ tools = [
88
+ *WebToolkit(
89
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
90
+ web_agent_model=models["web"],
91
+ planning_agent_model=models["planning"],
92
+ ).get_tools(),
93
+ *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
94
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
95
+ SearchToolkit().search_duckduckgo,
96
+ #SearchToolkit().search_google, # Comment this out if you don't have google search
97
+ SearchToolkit().search_wiki,
98
+ *ExcelToolkit().get_tools(),
99
+ ]
100
+
101
+ # Configure agent roles and parameters
102
+ user_agent_kwargs = {"model": models["user"]}
103
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
104
+
105
+ # Configure task parameters
106
+ task_kwargs = {
107
+ "task_prompt": question,
108
+ "with_task_specify": False,
109
+ }
110
+
111
+ # Create and return the society
112
+ society = OwlRolePlaying(
113
+ **task_kwargs,
114
+ user_role_name="user",
115
+ user_agent_kwargs=user_agent_kwargs,
116
+ assistant_role_name="assistant",
117
+ assistant_agent_kwargs=assistant_agent_kwargs,
118
+ )
119
+
120
+ return society
121
+
122
+
123
+ def main():
124
+ r"""Main function to run the OWL system with an example question."""
125
+ # Example research question
126
+ question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
127
+
128
+ # Construct and run the society
129
+ society = construct_society(question)
130
+ answer, chat_history, token_count = run_society(society)
131
+
132
+ # Output the result
133
+ print(f"\033[94mAnswer: {answer}\033[0m")
134
+
135
+
136
+ if __name__ == "__main__":
137
+ main()