Merge branch 'main' into branch_mk
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .container/.dockerignore +74 -0
- .container/DOCKER_README.md +298 -0
- .container/DOCKER_README_en.md +298 -0
- .container/Dockerfile +58 -0
- .container/build_docker.bat +186 -0
- .container/build_docker.sh +150 -0
- .container/check_docker.bat +88 -0
- .container/check_docker.sh +92 -0
- .container/docker-compose.yml +34 -0
- .container/run_in_docker.bat +181 -0
- .container/run_in_docker.sh +135 -0
- .pre-commit-config.yaml +29 -0
- README.md +387 -42
- README_zh.md +373 -30
- community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md +175 -0
- licenses/update_license.py +17 -23
- owl/.env_template +11 -3
- owl/app.py +921 -0
- owl/app_en.py +948 -0
- owl/camel/__init__.py +0 -25
- owl/camel/__pycache__/__init__.cpython-311.pyc +0 -0
- owl/camel/__pycache__/generators.cpython-311.pyc +0 -0
- owl/camel/__pycache__/human.cpython-311.pyc +0 -0
- owl/camel/__pycache__/logger.cpython-311.pyc +0 -0
- owl/camel/agents/__init__.py +0 -44
- owl/camel/agents/__pycache__/__init__.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/base.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/chat_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/critic_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/embodied_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/knowledge_graph_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/role_assignment_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/search_agent.cpython-311.pyc +0 -0
- owl/camel/agents/__pycache__/task_agent.cpython-311.pyc +0 -0
- owl/camel/agents/base.py +0 -29
- owl/camel/agents/chat_agent.py +0 -1423
- owl/camel/agents/critic_agent.py +0 -202
- owl/camel/agents/deductive_reasoner_agent.py +0 -303
- owl/camel/agents/embodied_agent.py +0 -201
- owl/camel/agents/knowledge_graph_agent.py +0 -259
- owl/camel/agents/role_assignment_agent.py +0 -141
- owl/camel/agents/search_agent.py +0 -133
- owl/camel/agents/task_agent.py +0 -410
- owl/camel/agents/tool_agents/__init__.py +0 -20
- owl/camel/agents/tool_agents/__pycache__/__init__.cpython-311.pyc +0 -0
- owl/camel/agents/tool_agents/__pycache__/base.cpython-311.pyc +0 -0
- owl/camel/agents/tool_agents/__pycache__/hugging_face_tool_agent.cpython-311.pyc +0 -0
- owl/camel/agents/tool_agents/base.py +0 -39
- owl/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -206
- owl/camel/benchmarks/__init__.py +0 -17
.container/.dockerignore
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Git
|
2 |
+
.git
|
3 |
+
.gitignore
|
4 |
+
.github
|
5 |
+
|
6 |
+
# Docker
|
7 |
+
Dockerfile
|
8 |
+
docker-compose.yml
|
9 |
+
.dockerignore
|
10 |
+
DOCKER_README.md
|
11 |
+
run_in_docker.sh
|
12 |
+
|
13 |
+
# Python
|
14 |
+
__pycache__/
|
15 |
+
*.py[cod]
|
16 |
+
*$py.class
|
17 |
+
*.so
|
18 |
+
.Python
|
19 |
+
env/
|
20 |
+
build/
|
21 |
+
develop-eggs/
|
22 |
+
dist/
|
23 |
+
downloads/
|
24 |
+
eggs/
|
25 |
+
.eggs/
|
26 |
+
lib/
|
27 |
+
lib64/
|
28 |
+
parts/
|
29 |
+
sdist/
|
30 |
+
var/
|
31 |
+
*.egg-info/
|
32 |
+
.installed.cfg
|
33 |
+
*.egg
|
34 |
+
.pytest_cache/
|
35 |
+
.coverage
|
36 |
+
htmlcov/
|
37 |
+
|
38 |
+
# 虚拟环境
|
39 |
+
venv/
|
40 |
+
ENV/
|
41 |
+
env/
|
42 |
+
.env
|
43 |
+
|
44 |
+
# IDE
|
45 |
+
.idea/
|
46 |
+
.vscode/
|
47 |
+
*.swp
|
48 |
+
*.swo
|
49 |
+
.DS_Store
|
50 |
+
|
51 |
+
# 临时文件
|
52 |
+
temp_*
|
53 |
+
*.tmp
|
54 |
+
*.log
|
55 |
+
*.bak
|
56 |
+
|
57 |
+
# 缓存
|
58 |
+
.cache/
|
59 |
+
.npm/
|
60 |
+
.yarn/
|
61 |
+
|
62 |
+
# 大型数据文件
|
63 |
+
*.csv
|
64 |
+
*.sqlite
|
65 |
+
*.db
|
66 |
+
*.hdf5
|
67 |
+
*.h5
|
68 |
+
*.parquet
|
69 |
+
*.feather
|
70 |
+
*.pkl
|
71 |
+
*.pickle
|
72 |
+
|
73 |
+
# 数据目录
|
74 |
+
data/
|
.container/DOCKER_README.md
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# OWL项目Docker使用指南
|
2 |
+
|
3 |
+
本文档提供了如何使用Docker运行OWL项目的详细说明。
|
4 |
+
|
5 |
+
## 前提条件
|
6 |
+
|
7 |
+
- 安装 [Docker](https://docs.docker.com/get-docker/)
|
8 |
+
- 安装 [Docker Compose](https://docs.docker.com/compose/install/) (推荐v2.x版本)
|
9 |
+
- 获取必要的API密钥(OpenAI API等)
|
10 |
+
|
11 |
+
## 技术说明
|
12 |
+
|
13 |
+
本Docker配置使用了以下技术来确保OWL项目在容器中正常运行:
|
14 |
+
|
15 |
+
- **Xvfb**:虚拟帧缓冲区,用于在无显示器的环境中模拟X服务器
|
16 |
+
- **Playwright**:用于自动化浏览器操作,配置为无头模式
|
17 |
+
- **共享内存**:增加了共享内存大小,以提高浏览器性能
|
18 |
+
- **BuildKit**:使用Docker BuildKit加速构建过程
|
19 |
+
- **缓存优化**:使用持久化卷缓存pip和Playwright依赖
|
20 |
+
- **跨平台兼容**:提供了适用于Windows和macOS/Linux的脚本
|
21 |
+
|
22 |
+
## Docker Compose版本说明
|
23 |
+
|
24 |
+
本项目使用的docker-compose.yml文件兼容Docker Compose v2.x版本。如果您使用的是较旧的Docker Compose v1.x版本,可能需要手动添加版本号:
|
25 |
+
|
26 |
+
```yaml
|
27 |
+
version: '3'
|
28 |
+
|
29 |
+
services:
|
30 |
+
# ...其余配置保持不变
|
31 |
+
```
|
32 |
+
|
33 |
+
## 快速开始
|
34 |
+
|
35 |
+
### 0. 检查环境
|
36 |
+
|
37 |
+
首先,运行检查脚本确保您的环境已准备好:
|
38 |
+
|
39 |
+
#### 在macOS/Linux上检查
|
40 |
+
|
41 |
+
```bash
|
42 |
+
# 先给脚本添加执行权限
|
43 |
+
chmod +x check_docker.sh
|
44 |
+
|
45 |
+
# 运行检查脚本
|
46 |
+
./check_docker.sh
|
47 |
+
```
|
48 |
+
|
49 |
+
#### 在Windows上检查
|
50 |
+
|
51 |
+
```cmd
|
52 |
+
check_docker.bat
|
53 |
+
```
|
54 |
+
|
55 |
+
如果检查脚本发现任何问题,请按照提示进行修复。
|
56 |
+
|
57 |
+
### 1. 配置环境变量
|
58 |
+
|
59 |
+
复制环境变量模板文件并填写必要的API密钥:
|
60 |
+
|
61 |
+
```bash
|
62 |
+
cp owl/.env_template owl/.env
|
63 |
+
```
|
64 |
+
|
65 |
+
然后编辑 `owl/.env` 文件,填写必要的API密钥,例如:
|
66 |
+
|
67 |
+
```
|
68 |
+
OPENAI_API_KEY=your_openai_api_key
|
69 |
+
GOOGLE_API_KEY=your_google_api_key
|
70 |
+
SEARCH_ENGINE_ID=your_search_engine_id
|
71 |
+
```
|
72 |
+
|
73 |
+
### 2. 快速构建Docker镜像
|
74 |
+
|
75 |
+
#### 在macOS/Linux上构建
|
76 |
+
|
77 |
+
使用提供的Shell脚本,可以加速Docker镜像的构建:
|
78 |
+
|
79 |
+
```bash
|
80 |
+
# 先给脚本添加执行权限
|
81 |
+
chmod +x build_docker.sh
|
82 |
+
|
83 |
+
# 运行构建脚本
|
84 |
+
./build_docker.sh
|
85 |
+
```
|
86 |
+
|
87 |
+
#### 在Windows上构建
|
88 |
+
|
89 |
+
使用提供的批处理文件:
|
90 |
+
|
91 |
+
```cmd
|
92 |
+
build_docker.bat
|
93 |
+
```
|
94 |
+
|
95 |
+
或者使用标准方式构建并启动:
|
96 |
+
|
97 |
+
```bash
|
98 |
+
# 使用BuildKit加速构建
|
99 |
+
set DOCKER_BUILDKIT=1
|
100 |
+
set COMPOSE_DOCKER_CLI_BUILD=1
|
101 |
+
docker-compose build --build-arg BUILDKIT_INLINE_CACHE=1
|
102 |
+
|
103 |
+
# 启动容器
|
104 |
+
docker-compose up -d
|
105 |
+
```
|
106 |
+
|
107 |
+
### 3. 交互式使用容器
|
108 |
+
|
109 |
+
容器启动后,会自动进入交互式shell环境,并显示欢迎信息和可用脚本列表:
|
110 |
+
|
111 |
+
```bash
|
112 |
+
# 进入容器(如果没有自动进入)
|
113 |
+
docker-compose exec owl bash
|
114 |
+
```
|
115 |
+
|
116 |
+
在容器内,您可以直接运行任何可用的脚本:
|
117 |
+
|
118 |
+
```bash
|
119 |
+
# 运行默认脚本
|
120 |
+
xvfb-python run.py
|
121 |
+
|
122 |
+
# 运行DeepSeek示例
|
123 |
+
xvfb-python run_deepseek_example.py
|
124 |
+
|
125 |
+
# 运行脚本并传递查询参数
|
126 |
+
xvfb-python run.py "什么是人工智能?"
|
127 |
+
```
|
128 |
+
|
129 |
+
### 4. 使用外部脚本运行查询
|
130 |
+
|
131 |
+
#### 在macOS/Linux上运行
|
132 |
+
|
133 |
+
```bash
|
134 |
+
# 先给脚本添加执行权限
|
135 |
+
chmod +x run_in_docker.sh
|
136 |
+
|
137 |
+
# 默认使用 run.py 脚本
|
138 |
+
./run_in_docker.sh "你的问题"
|
139 |
+
|
140 |
+
# 指定使用特定脚本
|
141 |
+
./run_in_docker.sh run_deepseek_example.py "你的问题"
|
142 |
+
```
|
143 |
+
|
144 |
+
#### 在Windows上运行
|
145 |
+
|
146 |
+
```cmd
|
147 |
+
REM 默认使用 run.py 脚本
|
148 |
+
run_in_docker.bat "你的问题"
|
149 |
+
|
150 |
+
REM 指定使用特定脚本
|
151 |
+
run_in_docker.bat run_deepseek_example.py "你的问题"
|
152 |
+
```
|
153 |
+
|
154 |
+
**可用脚本**:
|
155 |
+
- `run.py` - 默认脚本,使用OpenAI GPT-4o模型
|
156 |
+
- `run_deepseek_example.py` - 使用DeepSeek模型
|
157 |
+
- `run_gaia_roleplaying.py` - GAIA基准测试脚本
|
158 |
+
|
159 |
+
## 目录挂载
|
160 |
+
|
161 |
+
Docker Compose配置中已经设置了以下挂载点:
|
162 |
+
|
163 |
+
- `./owl/.env:/app/owl/.env`:挂载环境变量文件,方便修改API密钥
|
164 |
+
- `./data:/app/data`:挂载数据目录,用于存储和访问数据文件
|
165 |
+
- `playwright-cache`:持久化卷,用于缓存Playwright浏览器
|
166 |
+
- `pip-cache`:持久化卷,用于缓存pip包
|
167 |
+
|
168 |
+
## 环境变量
|
169 |
+
|
170 |
+
您可以通过以下两种方式设置环境变量:
|
171 |
+
|
172 |
+
1. 修改 `owl/.env` 文件
|
173 |
+
2. 在 `docker-compose.yml` 文件的 `environment` 部分添加环境变量
|
174 |
+
|
175 |
+
## 构建优化
|
176 |
+
|
177 |
+
本Docker配置包含多项构建优化:
|
178 |
+
|
179 |
+
1. **使用国内镜像源**:使用清华大学镜像源加速pip包下载
|
180 |
+
2. **层优化**:减少Dockerfile中的层数,提高构建效率
|
181 |
+
3. **缓存利用**:
|
182 |
+
- 启用pip缓存,避免重复下载依赖包
|
183 |
+
- 使用Docker BuildKit内联缓存
|
184 |
+
- 合理安排Dockerfile指令顺序,最大化利用缓存
|
185 |
+
4. **BuildKit**:启用Docker BuildKit加速构建
|
186 |
+
5. **持久化缓存**:
|
187 |
+
- 使用Docker卷缓存pip包(`pip-cache`)
|
188 |
+
- 使用Docker卷缓存Playwright浏览器(`playwright-cache`)
|
189 |
+
- 本地缓存目录(`.docker-cache`)
|
190 |
+
|
191 |
+
### 缓存清理
|
192 |
+
|
193 |
+
如果需要清理缓存,可以使用以下命令:
|
194 |
+
|
195 |
+
```bash
|
196 |
+
# 清理Docker构建缓存
|
197 |
+
docker builder prune
|
198 |
+
|
199 |
+
# 清理Docker卷(会删除所有未使用的卷,包括缓存卷)
|
200 |
+
docker volume prune
|
201 |
+
|
202 |
+
# 清理本��缓存目录
|
203 |
+
rm -rf .docker-cache
|
204 |
+
```
|
205 |
+
|
206 |
+
## 跨平台兼容性
|
207 |
+
|
208 |
+
本项目提供了适用于不同操作系统的脚本:
|
209 |
+
|
210 |
+
1. **检查脚本**:
|
211 |
+
- `check_docker.sh`(macOS/Linux):检查Docker环境
|
212 |
+
- `check_docker.bat`(Windows):检查Docker环境
|
213 |
+
|
214 |
+
2. **构建脚本**:
|
215 |
+
- `build_docker.sh`(macOS/Linux):构建Docker镜像
|
216 |
+
- `build_docker.bat`(Windows):构建Docker镜像
|
217 |
+
|
218 |
+
3. **运行脚本**:
|
219 |
+
- `run_in_docker.sh`(macOS/Linux):运行Docker容器中的脚本
|
220 |
+
- `run_in_docker.bat`(Windows):运行Docker容器中的脚本
|
221 |
+
|
222 |
+
这些脚本会自动检测操作系统类型,并使用适当的命令。
|
223 |
+
|
224 |
+
## 故障排除
|
225 |
+
|
226 |
+
### 容器无法启动
|
227 |
+
|
228 |
+
检查日志以获取更多信息:
|
229 |
+
|
230 |
+
```bash
|
231 |
+
docker-compose logs
|
232 |
+
```
|
233 |
+
|
234 |
+
### API密钥问题
|
235 |
+
|
236 |
+
确保您已经在 `owl/.env` 文件中正确设置了所有必要的API密钥。
|
237 |
+
|
238 |
+
### Docker Compose警告
|
239 |
+
|
240 |
+
如果您看到关于`version`属性过时的警告:
|
241 |
+
|
242 |
+
```
|
243 |
+
WARN[0000] docker-compose.yml: the attribute `version` is obsolete
|
244 |
+
```
|
245 |
+
|
246 |
+
这是因为您使用的是Docker Compose v2.x,它不再需要显式指定版本号。我们已经从配置文件中移除了这个属性,所以您不会再看到这个警告。
|
247 |
+
|
248 |
+
### 浏览器相关问题
|
249 |
+
|
250 |
+
如果遇到浏览器相关的问题,可以尝试以下解决方案:
|
251 |
+
|
252 |
+
1. 确保在Docker容器中使用`xvfb-python`命令运行Python脚本
|
253 |
+
2. 检查是否正确安装了Xvfb和相关依赖
|
254 |
+
3. 增加共享内存大小(在docker-compose.yml中已设置为2GB)
|
255 |
+
|
256 |
+
### 构建速度慢
|
257 |
+
|
258 |
+
如果构建速度慢,可以尝试以下解决方案:
|
259 |
+
|
260 |
+
1. 确保启用了Docker BuildKit(`DOCKER_BUILDKIT=1`)
|
261 |
+
2. 确保启用了pip缓存(已在docker-compose.yml中配置)
|
262 |
+
3. 使用`--build-arg BUILDKIT_INLINE_CACHE=1`参数构建(已在构建脚本中配置)
|
263 |
+
4. 如果是首次构建,下载依赖包可能需要较长时间,后续构建会更快
|
264 |
+
|
265 |
+
### Windows特有问题
|
266 |
+
|
267 |
+
如果在Windows上遇到问题:
|
268 |
+
|
269 |
+
1. 确保使用管理员权限运行命令提示符或PowerShell
|
270 |
+
2. 如果遇到路径问题,尝试使用正斜杠(/)而不是反斜杠(\)
|
271 |
+
3. 如果遇到Docker Compose命令问题,尝试使用`docker compose`(无连字符)
|
272 |
+
|
273 |
+
### 内存不足
|
274 |
+
|
275 |
+
如果遇到内存不足的问题,可以在 `docker-compose.yml` 文件中调整资源限制:
|
276 |
+
|
277 |
+
```yaml
|
278 |
+
services:
|
279 |
+
owl:
|
280 |
+
# 其他配置...
|
281 |
+
deploy:
|
282 |
+
resources:
|
283 |
+
limits:
|
284 |
+
cpus: '4' # 增加CPU核心数
|
285 |
+
memory: 8G # 增加内存限制
|
286 |
+
```
|
287 |
+
|
288 |
+
## 自定义Docker镜像
|
289 |
+
|
290 |
+
如果需要自定义Docker镜像,可以修改 `Dockerfile` 文件,然后重新构建:
|
291 |
+
|
292 |
+
```bash
|
293 |
+
# macOS/Linux
|
294 |
+
./build_docker.sh
|
295 |
+
|
296 |
+
# Windows
|
297 |
+
build_docker.bat
|
298 |
+
```
|
.container/DOCKER_README_en.md
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# OWL Project Docker Usage Guide
|
2 |
+
|
3 |
+
This document provides detailed instructions on how to run the OWL project using Docker.
|
4 |
+
|
5 |
+
## Prerequisites
|
6 |
+
|
7 |
+
• Install [Docker](https://docs.docker.com/get-docker/)
|
8 |
+
• Install [Docker Compose](https://docs.docker.com/compose/install/) (recommended v2.x version)
|
9 |
+
• Obtain necessary API keys (OpenAI API, etc.)
|
10 |
+
|
11 |
+
## Technical Notes
|
12 |
+
|
13 |
+
This Docker configuration uses the following technologies to ensure the OWL project runs smoothly in containers:
|
14 |
+
|
15 |
+
• **Xvfb**: Virtual framebuffer, used to simulate an X server in a headless environment
|
16 |
+
• **Playwright**: Used for browser automation, configured in headless mode
|
17 |
+
• **Shared Memory**: Increased shared memory size to improve browser performance
|
18 |
+
• **BuildKit**: Uses Docker BuildKit to accelerate the build process
|
19 |
+
• **Cache Optimization**: Uses persistent volumes to cache pip and Playwright dependencies
|
20 |
+
• **Cross-Platform Compatibility**: Provides scripts for both Windows and macOS/Linux
|
21 |
+
|
22 |
+
## Docker Compose Version Notes
|
23 |
+
|
24 |
+
The docker-compose.yml file used in this project is compatible with Docker Compose v2.x. If you are using an older Docker Compose v1.x version, you may need to manually add the version number:
|
25 |
+
|
26 |
+
```yaml
|
27 |
+
version: '3'
|
28 |
+
|
29 |
+
services:
|
30 |
+
# ...rest of the configuration remains unchanged
|
31 |
+
```
|
32 |
+
|
33 |
+
## Quick Start
|
34 |
+
|
35 |
+
### 0. Check Environment
|
36 |
+
|
37 |
+
First, run the check script to ensure your environment is ready:
|
38 |
+
|
39 |
+
#### Check on macOS/Linux
|
40 |
+
|
41 |
+
```bash
|
42 |
+
# First, add execute permissions to the script
|
43 |
+
chmod +x check_docker.sh
|
44 |
+
|
45 |
+
# Run the check script
|
46 |
+
./check_docker.sh
|
47 |
+
```
|
48 |
+
|
49 |
+
#### Check on Windows
|
50 |
+
|
51 |
+
```cmd
|
52 |
+
check_docker.bat
|
53 |
+
```
|
54 |
+
|
55 |
+
If the check script finds any issues, please follow the prompts to fix them.
|
56 |
+
|
57 |
+
### 1. Configure Environment Variables
|
58 |
+
|
59 |
+
Copy the environment variable template file and fill in the necessary API keys:
|
60 |
+
|
61 |
+
```bash
|
62 |
+
cp owl/.env_template owl/.env
|
63 |
+
```
|
64 |
+
|
65 |
+
Then edit the `owl/.env` file and fill in the necessary API keys, for example:
|
66 |
+
|
67 |
+
```
|
68 |
+
OPENAI_API_KEY=your_openai_api_key
|
69 |
+
GOOGLE_API_KEY=your_google_api_key
|
70 |
+
SEARCH_ENGINE_ID=your_search_engine_id
|
71 |
+
```
|
72 |
+
|
73 |
+
### 2. Quick Build Docker Image
|
74 |
+
|
75 |
+
#### Build on macOS/Linux
|
76 |
+
|
77 |
+
Use the provided shell script to speed up the Docker image build:
|
78 |
+
|
79 |
+
```bash
|
80 |
+
# First, add execute permissions to the script
|
81 |
+
chmod +x build_docker.sh
|
82 |
+
|
83 |
+
# Run the build script
|
84 |
+
./build_docker.sh
|
85 |
+
```
|
86 |
+
|
87 |
+
#### Build on Windows
|
88 |
+
|
89 |
+
Use the provided batch file:
|
90 |
+
|
91 |
+
```cmd
|
92 |
+
build_docker.bat
|
93 |
+
```
|
94 |
+
|
95 |
+
Or build and start using the standard method:
|
96 |
+
|
97 |
+
```bash
|
98 |
+
# Use BuildKit to accelerate the build
|
99 |
+
set DOCKER_BUILDKIT=1
|
100 |
+
set COMPOSE_DOCKER_CLI_BUILD=1
|
101 |
+
docker-compose build --build-arg BUILDKIT_INLINE_CACHE=1
|
102 |
+
|
103 |
+
# Start the container
|
104 |
+
docker-compose up -d
|
105 |
+
```
|
106 |
+
|
107 |
+
### 3. Interactive Use of the Container
|
108 |
+
|
109 |
+
After the container starts, it will automatically enter an interactive shell environment and display a welcome message and a list of available scripts:
|
110 |
+
|
111 |
+
```bash
|
112 |
+
# Enter the container (if not automatically entered)
|
113 |
+
docker-compose exec owl bash
|
114 |
+
```
|
115 |
+
|
116 |
+
Inside the container, you can directly run any available script:
|
117 |
+
|
118 |
+
```bash
|
119 |
+
# Run the default script
|
120 |
+
xvfb-python run.py
|
121 |
+
|
122 |
+
# Run the DeepSeek example
|
123 |
+
xvfb-python run_deepseek_example.py
|
124 |
+
|
125 |
+
# Run the script and pass query parameters
|
126 |
+
xvfb-python run.py "What is artificial intelligence?"
|
127 |
+
```
|
128 |
+
|
129 |
+
### 4. Run Queries Using External Scripts
|
130 |
+
|
131 |
+
#### Run on macOS/Linux
|
132 |
+
|
133 |
+
```bash
|
134 |
+
# First, add execute permissions to the script
|
135 |
+
chmod +x run_in_docker.sh
|
136 |
+
|
137 |
+
# Default to using the run.py script
|
138 |
+
./run_in_docker.sh "your question"
|
139 |
+
|
140 |
+
# Specify a particular script
|
141 |
+
./run_in_docker.sh run_deepseek_example.py "your question"
|
142 |
+
```
|
143 |
+
|
144 |
+
#### Run on Windows
|
145 |
+
|
146 |
+
```cmd
|
147 |
+
REM Default to using the run.py script
|
148 |
+
run_in_docker.bat "your question"
|
149 |
+
|
150 |
+
REM Specify a particular script
|
151 |
+
run_in_docker.bat run_deepseek_example.py "your question"
|
152 |
+
```
|
153 |
+
|
154 |
+
**Available Scripts**:
|
155 |
+
• `run.py` - Default script, uses OpenAI GPT-4o model
|
156 |
+
• `run_deepseek_example.py` - Uses the DeepSeek model
|
157 |
+
• `run_gaia_roleplaying.py` - GAIA benchmark script
|
158 |
+
|
159 |
+
## Directory Mounts
|
160 |
+
|
161 |
+
The Docker Compose configuration has set up the following mount points:
|
162 |
+
|
163 |
+
• `./owl/.env:/app/owl/.env`: Mounts the environment variable file for easy modification of API keys
|
164 |
+
• `./data:/app/data`: Mounts the data directory for storing and accessing data files
|
165 |
+
• `playwright-cache`: Persistent volume for caching Playwright browsers
|
166 |
+
• `pip-cache`: Persistent volume for caching pip packages
|
167 |
+
|
168 |
+
## Environment Variables
|
169 |
+
|
170 |
+
You can set environment variables in two ways:
|
171 |
+
|
172 |
+
1. Modify the `owl/.env` file
|
173 |
+
2. Add environment variables in the `environment` section of the `docker-compose.yml` file
|
174 |
+
|
175 |
+
## Build Optimization
|
176 |
+
|
177 |
+
This Docker configuration includes several build optimizations:
|
178 |
+
|
179 |
+
1. **Use of Domestic Mirror Sources**: Uses Tsinghua University mirror sources to accelerate pip package downloads
|
180 |
+
2. **Layer Optimization**: Reduces the number of layers in the Dockerfile to improve build efficiency
|
181 |
+
3. **Cache Utilization**:
|
182 |
+
• Enables pip caching to avoid repeated dependency downloads
|
183 |
+
• Uses Docker BuildKit inline caching
|
184 |
+
• Arranges Dockerfile instructions to maximize cache utilization
|
185 |
+
4. **BuildKit**: Enables Docker BuildKit to accelerate builds
|
186 |
+
5. **Persistent Caching**:
|
187 |
+
• Uses Docker volumes to cache pip packages (`pip-cache`)
|
188 |
+
• Uses Docker volumes to cache Playwright browsers (`playwright-cache`)
|
189 |
+
• Local cache directory (`.docker-cache`)
|
190 |
+
|
191 |
+
### Cache Cleanup
|
192 |
+
|
193 |
+
If you need to clean the cache, you can use the following commands:
|
194 |
+
|
195 |
+
```bash
|
196 |
+
# Clean Docker build cache
|
197 |
+
docker builder prune
|
198 |
+
|
199 |
+
# Clean Docker volumes (will delete all unused volumes, including cache volumes)
|
200 |
+
docker volume prune
|
201 |
+
|
202 |
+
# Clean local cache directory
|
203 |
+
rm -rf .docker-cache
|
204 |
+
```
|
205 |
+
|
206 |
+
## Cross-Platform Compatibility
|
207 |
+
|
208 |
+
This project provides scripts for different operating systems:
|
209 |
+
|
210 |
+
1. **Check Scripts**:
|
211 |
+
• `check_docker.sh` (macOS/Linux): Checks the Docker environment
|
212 |
+
• `check_docker.bat` (Windows): Checks the Docker environment
|
213 |
+
|
214 |
+
2. **Build Scripts**:
|
215 |
+
• `build_docker.sh` (macOS/Linux): Builds the Docker image
|
216 |
+
• `build_docker.bat` (Windows): Builds the Docker image
|
217 |
+
|
218 |
+
3. **Run Scripts**:
|
219 |
+
• `run_in_docker.sh` (macOS/Linux): Runs scripts in the Docker container
|
220 |
+
• `run_in_docker.bat` (Windows): Runs scripts in the Docker container
|
221 |
+
|
222 |
+
These scripts automatically detect the operating system type and use appropriate commands.
|
223 |
+
|
224 |
+
## Troubleshooting
|
225 |
+
|
226 |
+
### Container Fails to Start
|
227 |
+
|
228 |
+
Check the logs for more information:
|
229 |
+
|
230 |
+
```bash
|
231 |
+
docker-compose logs
|
232 |
+
```
|
233 |
+
|
234 |
+
### API Key Issues
|
235 |
+
|
236 |
+
Ensure that you have correctly set all necessary API keys in the `owl/.env` file.
|
237 |
+
|
238 |
+
### Docker Compose Warnings
|
239 |
+
|
240 |
+
If you see a warning about the `version` attribute being obsolete:
|
241 |
+
|
242 |
+
```
|
243 |
+
WARN[0000] docker-compose.yml: the attribute `version` is obsolete
|
244 |
+
```
|
245 |
+
|
246 |
+
This is because you are using Docker Compose v2.x, which no longer requires an explicit version number. We have removed this attribute from the configuration file, so you should no longer see this warning.
|
247 |
+
|
248 |
+
### Browser-Related Issues
|
249 |
+
|
250 |
+
If you encounter browser-related issues, try the following solutions:
|
251 |
+
|
252 |
+
1. Ensure that you are using the `xvfb-python` command to run Python scripts in the Docker container
|
253 |
+
2. Check that Xvfb and related dependencies are correctly installed
|
254 |
+
3. Increase the shared memory size (set to 2GB in docker-compose.yml)
|
255 |
+
|
256 |
+
### Slow Build Speed
|
257 |
+
|
258 |
+
If the build speed is slow, try the following solutions:
|
259 |
+
|
260 |
+
1. Ensure that Docker BuildKit is enabled (`DOCKER_BUILDKIT=1`)
|
261 |
+
2. Ensure that pip caching is enabled (configured in docker-compose.yml)
|
262 |
+
3. Use the `--build-arg BUILDKIT_INLINE_CACHE=1` parameter when building (configured in the build script)
|
263 |
+
4. If this is the first build, downloading dependencies may take some time, but subsequent builds will be faster
|
264 |
+
|
265 |
+
### Windows-Specific Issues
|
266 |
+
|
267 |
+
If you encounter issues on Windows:
|
268 |
+
|
269 |
+
1. Ensure that you are running the Command Prompt or PowerShell with administrator privileges
|
270 |
+
2. If you encounter path issues, try using forward slashes (/) instead of backslashes (\)
|
271 |
+
3. If you encounter Docker Compose command issues, try using `docker compose` (without the hyphen)
|
272 |
+
|
273 |
+
### Insufficient Memory
|
274 |
+
|
275 |
+
If you encounter insufficient memory issues, you can adjust resource limits in the `docker-compose.yml` file:
|
276 |
+
|
277 |
+
```yaml
|
278 |
+
services:
|
279 |
+
owl:
|
280 |
+
# Other configurations...
|
281 |
+
deploy:
|
282 |
+
resources:
|
283 |
+
limits:
|
284 |
+
cpus: '4' # Increase CPU cores
|
285 |
+
memory: 8G # Increase memory limit
|
286 |
+
```
|
287 |
+
|
288 |
+
## Custom Docker Image
|
289 |
+
|
290 |
+
If you need to customize the Docker image, modify the `Dockerfile` file and then rebuild:
|
291 |
+
|
292 |
+
```bash
|
293 |
+
# macOS/Linux
|
294 |
+
./build_docker.sh
|
295 |
+
|
296 |
+
# Windows
|
297 |
+
build_docker.bat
|
298 |
+
```
|
.container/Dockerfile
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
# 设置环境变量
|
4 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
5 |
+
PYTHONUNBUFFERED=1 \
|
6 |
+
PIP_NO_CACHE_DIR=0 \
|
7 |
+
PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple \
|
8 |
+
PLAYWRIGHT_DOWNLOAD_HOST=https://npmmirror.com/mirrors/playwright \
|
9 |
+
PLAYWRIGHT_BROWSERS_PATH=/root/.cache/ms-playwright \
|
10 |
+
DEBIAN_FRONTEND=noninteractive
|
11 |
+
|
12 |
+
# 设置工作目录
|
13 |
+
WORKDIR /app
|
14 |
+
|
15 |
+
# 安装系统依赖(合并为一个RUN命令减少层数)
|
16 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
17 |
+
curl git ffmpeg libsm6 libxext6 xvfb xauth x11-utils \
|
18 |
+
gcc python3-dev \
|
19 |
+
&& apt-get clean \
|
20 |
+
&& rm -rf /var/lib/apt/lists/*
|
21 |
+
# 复制项目文件
|
22 |
+
COPY owl/ ./owl/
|
23 |
+
COPY licenses/ ./licenses/
|
24 |
+
COPY assets/ ./assets/
|
25 |
+
COPY README.md .
|
26 |
+
COPY README_zh.md .
|
27 |
+
COPY pyproject.toml .
|
28 |
+
|
29 |
+
# 创建README.md文件以避免构建错误
|
30 |
+
RUN echo "# OWL Project\n\n这是OWL项目的Docker环境。" > README.md
|
31 |
+
# 安装uv工具
|
32 |
+
RUN pip install uv
|
33 |
+
|
34 |
+
# 创建虚拟环境并安装依赖
|
35 |
+
RUN uv venv .venv --python=3.10 && \
|
36 |
+
. .venv/bin/activate && \
|
37 |
+
uv pip install -e .
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
# 创建启动脚本
|
43 |
+
RUN echo '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
|
44 |
+
chmod +x /usr/local/bin/xvfb-python
|
45 |
+
|
46 |
+
# 创建欢迎脚本
|
47 |
+
RUN echo '#!/bin/bash\necho "欢迎使用OWL项目Docker环境!"\necho "Welcome to OWL Project Docker environment!"\necho ""\necho "可用的脚本 | Available scripts:"\nls -1 *.py | grep -v "__" | sed "s/^/- /"\necho ""\necho "运行示例 | Run examples:"\necho " xvfb-python run.py # 运行默认脚本 | Run default script"\necho " xvfb-python run_deepseek_example.py # 运行DeepSeek示例 | Run DeepSeek example"\necho ""\necho "或者使用自定义查询 | Or use custom query:"\necho " xvfb-python run.py \"你的问题 | Your question\""\necho ""' > /usr/local/bin/owl-welcome && \
|
48 |
+
chmod +x /usr/local/bin/owl-welcome
|
49 |
+
|
50 |
+
# 设置工作目录
|
51 |
+
WORKDIR /app/owl
|
52 |
+
|
53 |
+
# 添加健康检查
|
54 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
55 |
+
CMD python -c "import sys; sys.exit(0 if __import__('os').path.exists('/app/owl') else 1)"
|
56 |
+
|
57 |
+
# 容器启动命令
|
58 |
+
CMD ["/bin/bash", "-c", "owl-welcome && /bin/bash"]
|
.container/build_docker.bat
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
+
setlocal enabledelayedexpansion
|
4 |
+
|
5 |
+
echo 在Windows上构建Docker镜像...
|
6 |
+
echo Building Docker image on Windows...
|
7 |
+
|
8 |
+
REM 设置配置变量
|
9 |
+
REM Set configuration variables
|
10 |
+
set CACHE_DIR=.docker-cache\pip
|
11 |
+
set BUILD_ARGS=--build-arg BUILDKIT_INLINE_CACHE=1
|
12 |
+
set COMPOSE_FILE=docker-compose.yml
|
13 |
+
|
14 |
+
REM 解析命令行参数
|
15 |
+
REM Parse command line arguments
|
16 |
+
set CLEAN_CACHE=0
|
17 |
+
set REBUILD=0
|
18 |
+
set SERVICE=
|
19 |
+
|
20 |
+
:parse_args
|
21 |
+
if "%~1"=="" goto :end_parse_args
|
22 |
+
if /i "%~1"=="--clean" (
|
23 |
+
set CLEAN_CACHE=1
|
24 |
+
shift
|
25 |
+
goto :parse_args
|
26 |
+
)
|
27 |
+
if /i "%~1"=="--rebuild" (
|
28 |
+
set REBUILD=1
|
29 |
+
shift
|
30 |
+
goto :parse_args
|
31 |
+
)
|
32 |
+
if /i "%~1"=="--service" (
|
33 |
+
set SERVICE=%~2
|
34 |
+
shift
|
35 |
+
shift
|
36 |
+
goto :parse_args
|
37 |
+
)
|
38 |
+
if /i "%~1"=="--help" (
|
39 |
+
echo 用法: build_docker.bat [选项]
|
40 |
+
echo Usage: build_docker.bat [options]
|
41 |
+
echo 选项:
|
42 |
+
echo Options:
|
43 |
+
echo --clean 清理缓存目录
|
44 |
+
echo --clean Clean cache directory
|
45 |
+
echo --rebuild 强制重新构建镜像
|
46 |
+
echo --rebuild Force rebuild image
|
47 |
+
echo --service 指定要构建的服务名称
|
48 |
+
echo --service Specify service name to build
|
49 |
+
echo --help 显示此帮助信息
|
50 |
+
echo --help Show this help message
|
51 |
+
exit /b 0
|
52 |
+
)
|
53 |
+
shift
|
54 |
+
goto :parse_args
|
55 |
+
:end_parse_args
|
56 |
+
|
57 |
+
REM 检查Docker是否安装
|
58 |
+
REM Check if Docker is installed
|
59 |
+
where docker >nul 2>nul
|
60 |
+
if %ERRORLEVEL% NEQ 0 (
|
61 |
+
echo 错误: Docker未安装
|
62 |
+
echo Error: Docker not installed
|
63 |
+
echo 请先安装Docker Desktop
|
64 |
+
echo Please install Docker Desktop first: https://docs.docker.com/desktop/install/windows-install/
|
65 |
+
pause
|
66 |
+
exit /b 1
|
67 |
+
)
|
68 |
+
|
69 |
+
REM 检查Docker是否运行
|
70 |
+
REM Check if Docker is running
|
71 |
+
docker info >nul 2>nul
|
72 |
+
if %ERRORLEVEL% NEQ 0 (
|
73 |
+
echo 错误: Docker未运行
|
74 |
+
echo Error: Docker not running
|
75 |
+
echo 请启动Docker Desktop应用程序
|
76 |
+
echo Please start Docker Desktop application
|
77 |
+
pause
|
78 |
+
exit /b 1
|
79 |
+
)
|
80 |
+
|
81 |
+
REM 检查docker-compose.yml文件是否存在
|
82 |
+
REM Check if docker-compose.yml file exists
|
83 |
+
if not exist "%COMPOSE_FILE%" (
|
84 |
+
echo 错误: 未找到%COMPOSE_FILE%文件
|
85 |
+
echo Error: %COMPOSE_FILE% file not found
|
86 |
+
echo 请确保在正确的目录中运行此脚本
|
87 |
+
echo Please make sure you are running this script in the correct directory
|
88 |
+
pause
|
89 |
+
exit /b 1
|
90 |
+
)
|
91 |
+
|
92 |
+
REM 检查Docker Compose命令
|
93 |
+
REM Check Docker Compose command
|
94 |
+
where docker-compose >nul 2>nul
|
95 |
+
if %ERRORLEVEL% EQU 0 (
|
96 |
+
set COMPOSE_CMD=docker-compose
|
97 |
+
) else (
|
98 |
+
echo 尝试使用新的docker compose命令...
|
99 |
+
echo Trying to use new docker compose command...
|
100 |
+
docker compose version >nul 2>nul
|
101 |
+
if %ERRORLEVEL% EQU 0 (
|
102 |
+
set COMPOSE_CMD=docker compose
|
103 |
+
) else (
|
104 |
+
echo 错误: 未找到Docker Compose命令
|
105 |
+
echo Error: Docker Compose command not found
|
106 |
+
echo 请确保Docker Desktop已正确安装
|
107 |
+
echo Please make sure Docker Desktop is properly installed
|
108 |
+
pause
|
109 |
+
exit /b 1
|
110 |
+
)
|
111 |
+
)
|
112 |
+
|
113 |
+
REM 设置Docker BuildKit环境变量
|
114 |
+
REM Set Docker BuildKit environment variables
|
115 |
+
set DOCKER_BUILDKIT=1
|
116 |
+
set COMPOSE_DOCKER_CLI_BUILD=1
|
117 |
+
|
118 |
+
echo 启用Docker BuildKit加速构建...
|
119 |
+
echo Enabling Docker BuildKit to accelerate build...
|
120 |
+
|
121 |
+
REM 清理缓存(如果指定)
|
122 |
+
REM Clean cache (if specified)
|
123 |
+
if %CLEAN_CACHE% EQU 1 (
|
124 |
+
echo 清理缓存目录...
|
125 |
+
echo Cleaning cache directory...
|
126 |
+
if exist "%CACHE_DIR%" rmdir /s /q "%CACHE_DIR%"
|
127 |
+
)
|
128 |
+
|
129 |
+
REM 创建缓存目录
|
130 |
+
REM Create cache directory
|
131 |
+
if not exist "%CACHE_DIR%" (
|
132 |
+
echo 创建缓存目录...
|
133 |
+
echo Creating cache directory...
|
134 |
+
mkdir "%CACHE_DIR%"
|
135 |
+
)
|
136 |
+
|
137 |
+
REM 添加构建时间标记
|
138 |
+
REM Add build time tag
|
139 |
+
for /f "tokens=2 delims==" %%a in ('wmic OS Get localdatetime /value') do set "dt=%%a"
|
140 |
+
set "YEAR=%dt:~0,4%"
|
141 |
+
set "MONTH=%dt:~4,2%"
|
142 |
+
set "DAY=%dt:~6,2%"
|
143 |
+
set "HOUR=%dt:~8,2%"
|
144 |
+
set "MINUTE=%dt:~10,2%"
|
145 |
+
set "BUILD_TIME=%YEAR%%MONTH%%DAY%_%HOUR%%MINUTE%"
|
146 |
+
set "BUILD_ARGS=%BUILD_ARGS% --build-arg BUILD_TIME=%BUILD_TIME%"
|
147 |
+
|
148 |
+
REM 构建Docker镜像
|
149 |
+
REM Build Docker image
|
150 |
+
echo 开始构建Docker镜像...
|
151 |
+
echo Starting to build Docker image...
|
152 |
+
|
153 |
+
if "%SERVICE%"=="" (
|
154 |
+
if %REBUILD% EQU 1 (
|
155 |
+
echo 强制重新构建所有服务...
|
156 |
+
echo Force rebuilding all services...
|
157 |
+
%COMPOSE_CMD% build --no-cache %BUILD_ARGS%
|
158 |
+
) else (
|
159 |
+
%COMPOSE_CMD% build %BUILD_ARGS%
|
160 |
+
)
|
161 |
+
) else (
|
162 |
+
if %REBUILD% EQU 1 (
|
163 |
+
echo 强制重新构建服务 %SERVICE%...
|
164 |
+
echo Force rebuilding service %SERVICE%...
|
165 |
+
%COMPOSE_CMD% build --no-cache %BUILD_ARGS% %SERVICE%
|
166 |
+
) else (
|
167 |
+
echo 构建服务 %SERVICE%...
|
168 |
+
echo Building service %SERVICE%...
|
169 |
+
%COMPOSE_CMD% build %BUILD_ARGS% %SERVICE%
|
170 |
+
)
|
171 |
+
)
|
172 |
+
|
173 |
+
if %ERRORLEVEL% EQU 0 (
|
174 |
+
echo Docker镜像构建成功!
|
175 |
+
echo Docker image build successful!
|
176 |
+
echo 构建时间: %BUILD_TIME%
|
177 |
+
echo Build time: %BUILD_TIME%
|
178 |
+
echo 可以使用以下命令启动容器:
|
179 |
+
echo You can use the following command to start the container:
|
180 |
+
echo %COMPOSE_CMD% up -d
|
181 |
+
) else (
|
182 |
+
echo Docker镜像构建失败,请检查错误信息。
|
183 |
+
echo Docker image build failed, please check error messages.
|
184 |
+
)
|
185 |
+
|
186 |
+
pause
|
.container/build_docker.sh
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# 设置配置变量 | Set configuration variables
|
4 |
+
CACHE_DIR=".docker-cache/pip"
|
5 |
+
BUILD_ARGS="--build-arg BUILDKIT_INLINE_CACHE=1"
|
6 |
+
COMPOSE_FILE="docker-compose.yml"
|
7 |
+
CLEAN_CACHE=0
|
8 |
+
REBUILD=0
|
9 |
+
SERVICE=""
|
10 |
+
|
11 |
+
# 解析命令行参数 | Parse command line arguments
|
12 |
+
while [[ $# -gt 0 ]]; do
|
13 |
+
case "$1" in
|
14 |
+
--clean)
|
15 |
+
CLEAN_CACHE=1
|
16 |
+
shift
|
17 |
+
;;
|
18 |
+
--rebuild)
|
19 |
+
REBUILD=1
|
20 |
+
shift
|
21 |
+
;;
|
22 |
+
--service)
|
23 |
+
SERVICE="$2"
|
24 |
+
shift 2
|
25 |
+
;;
|
26 |
+
--help)
|
27 |
+
echo "用法 | Usage: ./build_docker.sh [选项 | options]"
|
28 |
+
echo "选项 | Options:"
|
29 |
+
echo " --clean 清理缓存目录 | Clean cache directory"
|
30 |
+
echo " --rebuild 强制重新构建镜像 | Force rebuild image"
|
31 |
+
echo " --service 指定要构建的服务名称 | Specify service name to build"
|
32 |
+
echo " --help 显示此帮助信息 | Show this help message"
|
33 |
+
exit 0
|
34 |
+
;;
|
35 |
+
*)
|
36 |
+
echo "未知选项 | Unknown option: $1"
|
37 |
+
echo "使用 --help 查看帮助 | Use --help to see help"
|
38 |
+
exit 1
|
39 |
+
;;
|
40 |
+
esac
|
41 |
+
done
|
42 |
+
|
43 |
+
# 检测操作系统类型 | Detect operating system type
|
44 |
+
OS_TYPE=$(uname -s)
|
45 |
+
echo "检测到操作系统 | Detected OS: $OS_TYPE"
|
46 |
+
|
47 |
+
# 检查Docker是否安装 | Check if Docker is installed
|
48 |
+
if ! command -v docker &> /dev/null; then
|
49 |
+
echo "错误 | Error: Docker未安装 | Docker not installed"
|
50 |
+
echo "请先安装Docker | Please install Docker first: https://docs.docker.com/get-docker/"
|
51 |
+
exit 1
|
52 |
+
fi
|
53 |
+
|
54 |
+
# 检查Docker是否运行 | Check if Docker is running
|
55 |
+
if ! docker info &> /dev/null; then
|
56 |
+
echo "错误 | Error: Docker未运行 | Docker not running"
|
57 |
+
echo "请启动Docker服务 | Please start Docker service"
|
58 |
+
exit 1
|
59 |
+
fi
|
60 |
+
|
61 |
+
# 检查docker-compose.yml文件是否存在 | Check if docker-compose.yml file exists
|
62 |
+
if [ ! -f "$COMPOSE_FILE" ]; then
|
63 |
+
echo "错误 | Error: 未找到$COMPOSE_FILE文件 | $COMPOSE_FILE file not found"
|
64 |
+
echo "请确保在正确的目录中运行此脚本 | Please make sure you are running this script in the correct directory"
|
65 |
+
exit 1
|
66 |
+
fi
|
67 |
+
|
68 |
+
# 设置Docker BuildKit环境变量 | Set Docker BuildKit environment variables
|
69 |
+
export DOCKER_BUILDKIT=1
|
70 |
+
export COMPOSE_DOCKER_CLI_BUILD=1
|
71 |
+
|
72 |
+
echo "启用Docker BuildKit加速构建... | Enabling Docker BuildKit to accelerate build..."
|
73 |
+
|
74 |
+
# 清理缓存(如果指定) | Clean cache (if specified)
|
75 |
+
if [ $CLEAN_CACHE -eq 1 ]; then
|
76 |
+
echo "清理缓存目录... | Cleaning cache directory..."
|
77 |
+
rm -rf "$CACHE_DIR"
|
78 |
+
fi
|
79 |
+
|
80 |
+
# 创建缓存目录 | Create cache directory
|
81 |
+
mkdir -p "$CACHE_DIR"
|
82 |
+
|
83 |
+
# 添加构建时间标记 | Add build time tag
|
84 |
+
BUILD_TIME=$(date +"%Y%m%d_%H%M%S")
|
85 |
+
BUILD_ARGS="$BUILD_ARGS --build-arg BUILD_TIME=$BUILD_TIME"
|
86 |
+
|
87 |
+
# 获取脚本所在目录 | Get script directory
|
88 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
89 |
+
# 获取项目根目录(脚本所在目录的父目录) | Get project root directory (parent directory of script directory)
|
90 |
+
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
91 |
+
|
92 |
+
echo "脚本目录 | Script directory: $SCRIPT_DIR"
|
93 |
+
echo "项目根目录 | Project root directory: $PROJECT_ROOT"
|
94 |
+
|
95 |
+
# 切换到项目根目录 | Change to project root directory
|
96 |
+
cd "$PROJECT_ROOT"
|
97 |
+
|
98 |
+
# 检查Docker Compose命令 | Check Docker Compose command
|
99 |
+
if command -v docker-compose &> /dev/null; then
|
100 |
+
COMPOSE_CMD="docker-compose"
|
101 |
+
echo "使用 docker-compose 命令 | Using docker-compose command"
|
102 |
+
elif docker compose version &> /dev/null; then
|
103 |
+
COMPOSE_CMD="docker compose"
|
104 |
+
echo "使用 docker compose 命令 | Using docker compose command"
|
105 |
+
else
|
106 |
+
echo "错误 | Error: 未找到Docker Compose命令 | Docker Compose command not found"
|
107 |
+
echo "请安装Docker Compose | Please install Docker Compose: https://docs.docker.com/compose/install/"
|
108 |
+
exit 1
|
109 |
+
fi
|
110 |
+
|
111 |
+
# 检测CPU核心数,用于并行构建 | Detect CPU cores for parallel build
|
112 |
+
CPU_CORES=$(grep -c ^processor /proc/cpuinfo 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 2)
|
113 |
+
if [ $CPU_CORES -gt 2 ]; then
|
114 |
+
PARALLEL_FLAG="--parallel"
|
115 |
+
echo "检测到${CPU_CORES}个CPU核心,启用并行构建... | Detected ${CPU_CORES} CPU cores, enabling parallel build..."
|
116 |
+
else
|
117 |
+
PARALLEL_FLAG=""
|
118 |
+
fi
|
119 |
+
|
120 |
+
# 构建命令基础部分 | Base part of build command
|
121 |
+
BUILD_CMD="$COMPOSE_CMD -f \"$SCRIPT_DIR/docker-compose.yml\" build $PARALLEL_FLAG --build-arg BUILDKIT_INLINE_CACHE=1"
|
122 |
+
|
123 |
+
# 根据操作系统类型执行不同的命令 | Execute different commands based on OS type
|
124 |
+
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
125 |
+
# macOS
|
126 |
+
echo "在macOS上构建Docker镜像... | Building Docker image on macOS..."
|
127 |
+
eval $BUILD_CMD
|
128 |
+
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
129 |
+
# Linux
|
130 |
+
echo "在Linux上构建Docker镜像... | Building Docker image on Linux..."
|
131 |
+
eval $BUILD_CMD
|
132 |
+
elif [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
133 |
+
# Windows
|
134 |
+
echo "在Windows上构建Docker镜像... | Building Docker image on Windows..."
|
135 |
+
eval $BUILD_CMD
|
136 |
+
else
|
137 |
+
echo "未知操作系统,尝试使用标准命令构建... | Unknown OS, trying to build with standard command..."
|
138 |
+
eval $BUILD_CMD
|
139 |
+
fi
|
140 |
+
|
141 |
+
# 检查构建结果 | Check build result
|
142 |
+
if [ $? -eq 0 ]; then
|
143 |
+
echo "Docker镜像构建成功! | Docker image build successful!"
|
144 |
+
echo "构建时间 | Build time: $BUILD_TIME"
|
145 |
+
echo "可以使用以下命令启动容器: | You can use the following command to start the container:"
|
146 |
+
echo "$COMPOSE_CMD -f \"$SCRIPT_DIR/docker-compose.yml\" up -d"
|
147 |
+
else
|
148 |
+
echo "Docker镜像构建失败,请检查错误信息。 | Docker image build failed, please check error messages."
|
149 |
+
exit 1
|
150 |
+
fi
|
.container/check_docker.bat
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
+
echo 检查Docker环境...
|
4 |
+
echo Checking Docker environment...
|
5 |
+
|
6 |
+
REM 检查Docker是否安装
|
7 |
+
REM Check if Docker is installed
|
8 |
+
where docker >nul 2>nul
|
9 |
+
if %ERRORLEVEL% NEQ 0 (
|
10 |
+
echo 错误: Docker未安装
|
11 |
+
echo Error: Docker not installed
|
12 |
+
echo 在Windows上安装Docker的方法:
|
13 |
+
echo How to install Docker on Windows:
|
14 |
+
echo 1. 访问 https://docs.docker.com/desktop/install/windows-install/ 下载Docker Desktop
|
15 |
+
echo 1. Visit https://docs.docker.com/desktop/install/windows-install/ to download Docker Desktop
|
16 |
+
echo 2. 安装并启动Docker Desktop
|
17 |
+
echo 2. Install and start Docker Desktop
|
18 |
+
pause
|
19 |
+
exit /b 1
|
20 |
+
)
|
21 |
+
|
22 |
+
echo Docker已安装
|
23 |
+
echo Docker is installed
|
24 |
+
|
25 |
+
REM 检查Docker Compose是否安装
|
26 |
+
REM Check if Docker Compose is installed
|
27 |
+
where docker-compose >nul 2>nul
|
28 |
+
if %ERRORLEVEL% NEQ 0 (
|
29 |
+
echo 警告: Docker-Compose未找到,尝试使用新的docker compose命令
|
30 |
+
echo Warning: Docker-Compose not found, trying to use new docker compose command
|
31 |
+
docker compose version >nul 2>nul
|
32 |
+
if %ERRORLEVEL% NEQ 0 (
|
33 |
+
echo 错误: Docker Compose未安装
|
34 |
+
echo Error: Docker Compose not installed
|
35 |
+
echo Docker Desktop for Windows应该已包含Docker Compose
|
36 |
+
echo Docker Desktop for Windows should already include Docker Compose
|
37 |
+
echo 请确保Docker Desktop已正确安装
|
38 |
+
echo Please make sure Docker Desktop is properly installed
|
39 |
+
pause
|
40 |
+
exit /b 1
|
41 |
+
) else (
|
42 |
+
echo 使用新的docker compose命令
|
43 |
+
echo Using new docker compose command
|
44 |
+
set COMPOSE_CMD=docker compose
|
45 |
+
)
|
46 |
+
) else (
|
47 |
+
echo Docker-Compose已安装
|
48 |
+
echo Docker-Compose is installed
|
49 |
+
set COMPOSE_CMD=docker-compose
|
50 |
+
)
|
51 |
+
|
52 |
+
REM 检查Docker是否正在运行
|
53 |
+
REM Check if Docker is running
|
54 |
+
docker info >nul 2>nul
|
55 |
+
if %ERRORLEVEL% NEQ 0 (
|
56 |
+
echo 错误: Docker未运行
|
57 |
+
echo Error: Docker not running
|
58 |
+
echo 请启动Docker Desktop应用程序
|
59 |
+
echo Please start Docker Desktop application
|
60 |
+
pause
|
61 |
+
exit /b 1
|
62 |
+
)
|
63 |
+
|
64 |
+
echo Docker正在运行
|
65 |
+
echo Docker is running
|
66 |
+
|
67 |
+
REM 检查是否有.env文件
|
68 |
+
REM Check if .env file exists
|
69 |
+
if not exist "..\owl\.env" (
|
70 |
+
echo 警告: 未找到owl\.env文件
|
71 |
+
echo Warning: owl\.env file not found
|
72 |
+
echo 请运行以下命令创建环境变量文件
|
73 |
+
echo Please run the following command to create environment variable file:
|
74 |
+
echo copy ..\owl\.env_template ..\owl\.env
|
75 |
+
echo 然后编辑owl\.env文件,填写必要的API密钥
|
76 |
+
echo Then edit owl\.env file and fill in necessary API keys
|
77 |
+
) else (
|
78 |
+
echo 环境变量文件已存在
|
79 |
+
echo Environment variable file exists
|
80 |
+
)
|
81 |
+
|
82 |
+
echo 所有检查完成,您的系统已准备好构建和运行OWL项目的Docker容器
|
83 |
+
echo All checks completed, your system is ready to build and run OWL project Docker container
|
84 |
+
echo 请运行以下命令构建Docker镜像:
|
85 |
+
echo Please run the following command to build Docker image:
|
86 |
+
echo %COMPOSE_CMD% build
|
87 |
+
|
88 |
+
pause
|
.container/check_docker.sh
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# 检测操作系统类型 | Detect operating system type
|
4 |
+
OS_TYPE=$(uname -s)
|
5 |
+
echo "检测到操作系统 | Detected OS: $OS_TYPE"
|
6 |
+
|
7 |
+
# 检查Docker是否安装 | Check if Docker is installed
|
8 |
+
if ! command -v docker &> /dev/null; then
|
9 |
+
echo "错误 | Error: Docker未安装 | Docker not installed"
|
10 |
+
|
11 |
+
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
12 |
+
echo "在macOS上安装Docker的方法 | How to install Docker on macOS:"
|
13 |
+
echo "1. 访问 | Visit https://docs.docker.com/desktop/install/mac-install/ 下载Docker Desktop | to download Docker Desktop"
|
14 |
+
echo "2. 安装并启动Docker Desktop | Install and start Docker Desktop"
|
15 |
+
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
16 |
+
echo "在Linux上安装Docker的方法 | How to install Docker on Linux:"
|
17 |
+
echo "1. 运行以下命令 | Run the following commands:"
|
18 |
+
echo " sudo apt-get update"
|
19 |
+
echo " sudo apt-get install docker.io docker-compose"
|
20 |
+
echo "2. 启动Docker服务 | Start Docker service:"
|
21 |
+
echo " sudo systemctl start docker"
|
22 |
+
echo " sudo systemctl enable docker"
|
23 |
+
elif [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
24 |
+
echo "在Windows上安装Docker的方法 | How to install Docker on Windows:"
|
25 |
+
echo "1. 访问 | Visit https://docs.docker.com/desktop/install/windows-install/ 下载Docker Desktop | to download Docker Desktop"
|
26 |
+
echo "2. 安装并启动Docker Desktop | Install and start Docker Desktop"
|
27 |
+
fi
|
28 |
+
|
29 |
+
exit 1
|
30 |
+
fi
|
31 |
+
|
32 |
+
echo "Docker已安装 | Docker is installed"
|
33 |
+
|
34 |
+
# 检查Docker Compose是否安装 | Check if Docker Compose is installed
|
35 |
+
if ! command -v docker-compose &> /dev/null; then
|
36 |
+
echo "错误 | Error: Docker Compose未安装 | Docker Compose not installed"
|
37 |
+
|
38 |
+
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
39 |
+
echo "Docker Desktop for Mac已包含Docker Compose | Docker Desktop for Mac already includes Docker Compose"
|
40 |
+
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
41 |
+
echo "在Linux上安装Docker Compose的方法 | How to install Docker Compose on Linux:"
|
42 |
+
echo "1. 运行以下命令 | Run the following command:"
|
43 |
+
echo " sudo apt-get install docker-compose"
|
44 |
+
elif [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
45 |
+
echo "Docker Desktop for Windows已包含Docker Compose | Docker Desktop for Windows already includes Docker Compose"
|
46 |
+
fi
|
47 |
+
|
48 |
+
exit 1
|
49 |
+
fi
|
50 |
+
|
51 |
+
echo "Docker Compose已安装 | Docker Compose is installed"
|
52 |
+
|
53 |
+
# 检查Docker是否正在运行 | Check if Docker is running
|
54 |
+
if ! docker info &> /dev/null; then
|
55 |
+
echo "错误 | Error: Docker未运行 | Docker not running"
|
56 |
+
|
57 |
+
if [[ "$OS_TYPE" == "Darwin" ]]; then
|
58 |
+
echo "请启动Docker Desktop应用程序 | Please start Docker Desktop application"
|
59 |
+
elif [[ "$OS_TYPE" == "Linux" ]]; then
|
60 |
+
echo "请运行以下命令启动Docker服务 | Please run the following command to start Docker service:"
|
61 |
+
echo "sudo systemctl start docker"
|
62 |
+
elif [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
63 |
+
echo "请启动Docker Desktop应用程序 | Please start Docker Desktop application"
|
64 |
+
fi
|
65 |
+
|
66 |
+
exit 1
|
67 |
+
fi
|
68 |
+
|
69 |
+
echo "Docker正在运行 | Docker is running"
|
70 |
+
|
71 |
+
# 检查是否有足够的磁盘空间 | Check if there is enough disk space
|
72 |
+
FREE_SPACE=$(df -h . | awk 'NR==2 {print $4}')
|
73 |
+
echo "可用磁盘空间 | Available disk space: $FREE_SPACE"
|
74 |
+
|
75 |
+
# 检查是否有.env文件 | Check if .env file exists
|
76 |
+
if [ ! -f "owl/.env" ]; then
|
77 |
+
echo "警告 | Warning: 未找到owl/.env文件 | owl/.env file not found"
|
78 |
+
echo "请运行以下命令创建环境变量文件 | Please run the following command to create environment variable file:"
|
79 |
+
echo "cp owl/.env_template owl/.env"
|
80 |
+
echo "然后编辑owl/.env文件,填写必要的API密钥 | Then edit owl/.env file and fill in necessary API keys"
|
81 |
+
else
|
82 |
+
echo "环境变量文件已存在 | Environment variable file exists"
|
83 |
+
fi
|
84 |
+
|
85 |
+
echo "所有检查完成,您的系统已准备好构建和运行OWL项目的Docker容器 | All checks completed, your system is ready to build and run OWL project Docker container"
|
86 |
+
echo "请运行以下命令构建Docker镜像 | Please run the following command to build Docker image:"
|
87 |
+
|
88 |
+
if [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
89 |
+
echo "build_docker.bat"
|
90 |
+
else
|
91 |
+
echo "./build_docker.sh"
|
92 |
+
fi
|
.container/docker-compose.yml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
owl:
|
3 |
+
build:
|
4 |
+
context: ..
|
5 |
+
dockerfile: .container/Dockerfile
|
6 |
+
volumes:
|
7 |
+
# 挂载.env文件,方便配置API密钥
|
8 |
+
- ../owl/.env:/app/owl/.env
|
9 |
+
# 挂载数据目录
|
10 |
+
- ./data:/app/data
|
11 |
+
# 挂载缓存目录,避免重复下载
|
12 |
+
- ~/.cache/pip:/root/.pip/cache
|
13 |
+
- ~/.cache/playwright:/root/.cache/ms-playwright
|
14 |
+
environment:
|
15 |
+
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
16 |
+
- DISPLAY=:99
|
17 |
+
- PYTHONDONTWRITEBYTECODE=1
|
18 |
+
- PYTHONUNBUFFERED=1
|
19 |
+
- TERM=xterm-256color
|
20 |
+
ports:
|
21 |
+
- "8000:8000"
|
22 |
+
stdin_open: true
|
23 |
+
tty: true
|
24 |
+
shm_size: 2gb
|
25 |
+
# 简化资源限制
|
26 |
+
deploy:
|
27 |
+
resources:
|
28 |
+
limits:
|
29 |
+
memory: 4G
|
30 |
+
|
31 |
+
# 定义持久化卷,用于缓存 | Define persistent volumes for caching
|
32 |
+
volumes:
|
33 |
+
playwright-cache:
|
34 |
+
pip-cache:
|
.container/run_in_docker.bat
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
+
setlocal enabledelayedexpansion
|
4 |
+
|
5 |
+
REM 定义配置变量
|
6 |
+
REM Define configuration variables
|
7 |
+
set SERVICE_NAME=owl
|
8 |
+
set PYTHON_CMD=xvfb-python
|
9 |
+
set MAX_WAIT_SECONDS=60
|
10 |
+
set CHECK_INTERVAL_SECONDS=2
|
11 |
+
|
12 |
+
REM 检查参数
|
13 |
+
REM Check parameters
|
14 |
+
if "%~1"=="" (
|
15 |
+
echo 用法: run_in_docker.bat [脚本名称] "你的问题"
|
16 |
+
echo Usage: run_in_docker.bat [script name] "your question"
|
17 |
+
echo 例如: run_in_docker.bat run.py "什么是人工智能?"
|
18 |
+
echo Example: run_in_docker.bat run.py "What is artificial intelligence?"
|
19 |
+
echo 或者: run_in_docker.bat run_deepseek_example.py "什么是人工智能?"
|
20 |
+
echo Or: run_in_docker.bat run_deepseek_example.py "What is artificial intelligence?"
|
21 |
+
echo 如果不指定脚本名称,默认使用 run.py
|
22 |
+
echo If script name is not specified, run.py will be used by default
|
23 |
+
exit /b 1
|
24 |
+
)
|
25 |
+
|
26 |
+
REM 判断第一个参数是否是脚本名称
|
27 |
+
REM Determine if the first parameter is a script name
|
28 |
+
set SCRIPT_NAME=%~1
|
29 |
+
set QUERY=%~2
|
30 |
+
|
31 |
+
if "!SCRIPT_NAME:~-3!"==".py" (
|
32 |
+
REM 如果提供了第二个参数,则为查询内容
|
33 |
+
REM If a second parameter is provided, it's the query content
|
34 |
+
if "!QUERY!"=="" (
|
35 |
+
echo 请提供查询参数,例如: run_in_docker.bat !SCRIPT_NAME! "你的问题"
|
36 |
+
echo Please provide query parameter, e.g.: run_in_docker.bat !SCRIPT_NAME! "your question"
|
37 |
+
exit /b 1
|
38 |
+
)
|
39 |
+
) else (
|
40 |
+
REM 如果第一个参数不是脚本名称,则默认使用 run.py
|
41 |
+
REM If the first parameter is not a script name, use run.py by default
|
42 |
+
set QUERY=!SCRIPT_NAME!
|
43 |
+
set SCRIPT_NAME=run.py
|
44 |
+
)
|
45 |
+
|
46 |
+
REM 检查脚本是否存在
|
47 |
+
REM Check if the script exists
|
48 |
+
if not exist "..\owl\!SCRIPT_NAME!" (
|
49 |
+
echo 错误: 脚本 '..\owl\!SCRIPT_NAME!' 不存在
|
50 |
+
echo Error: Script '..\owl\!SCRIPT_NAME!' does not exist
|
51 |
+
echo 可用的脚本有:
|
52 |
+
echo Available scripts:
|
53 |
+
dir /b ..\owl\*.py | findstr /v "__"
|
54 |
+
exit /b 1
|
55 |
+
)
|
56 |
+
|
57 |
+
echo 使用脚本: !SCRIPT_NAME!
|
58 |
+
echo Using script: !SCRIPT_NAME!
|
59 |
+
echo 查询内容: !QUERY!
|
60 |
+
echo Query content: !QUERY!
|
61 |
+
|
62 |
+
REM 优先检查新版 docker compose 命令
|
63 |
+
REM Check new docker compose command first
|
64 |
+
docker compose version >nul 2>nul
|
65 |
+
if %ERRORLEVEL% EQU 0 (
|
66 |
+
echo 使用新版 docker compose 命令
|
67 |
+
echo Using new docker compose command
|
68 |
+
set COMPOSE_CMD=docker compose
|
69 |
+
) else (
|
70 |
+
REM 如果新版不可用,检查旧版 docker-compose
|
71 |
+
REM If new version is not available, check old docker-compose
|
72 |
+
where docker-compose >nul 2>nul
|
73 |
+
if %ERRORLEVEL% EQU 0 (
|
74 |
+
echo 使用旧版 docker-compose 命令
|
75 |
+
echo Using old docker-compose command
|
76 |
+
set COMPOSE_CMD=docker-compose
|
77 |
+
) else (
|
78 |
+
echo 错误: Docker Compose 未安装
|
79 |
+
echo Error: Docker Compose not installed
|
80 |
+
echo 请确保 Docker Desktop 已正确安装
|
81 |
+
echo Please make sure Docker Desktop is properly installed
|
82 |
+
pause
|
83 |
+
exit /b 1
|
84 |
+
)
|
85 |
+
)
|
86 |
+
|
87 |
+
REM 从docker-compose.yml获取服务名称(如果文件存在)
|
88 |
+
REM Get service name from docker-compose.yml (if file exists)
|
89 |
+
if exist "docker-compose.yml" (
|
90 |
+
for /f "tokens=*" %%a in ('findstr /r "^ [a-zA-Z0-9_-]*:" docker-compose.yml') do (
|
91 |
+
set line=%%a
|
92 |
+
set service=!line:~2,-1!
|
93 |
+
if not "!service!"=="" (
|
94 |
+
REM 使用第一个找到的服务名称
|
95 |
+
REM Use the first service name found
|
96 |
+
set SERVICE_NAME=!service!
|
97 |
+
echo 从docker-compose.yml检测到服务名称: !SERVICE_NAME!
|
98 |
+
echo Detected service name from docker-compose.yml: !SERVICE_NAME!
|
99 |
+
goto :found_service
|
100 |
+
)
|
101 |
+
)
|
102 |
+
)
|
103 |
+
:found_service
|
104 |
+
|
105 |
+
REM 确保Docker容器正在运行
|
106 |
+
REM Ensure Docker container is running
|
107 |
+
%COMPOSE_CMD% ps | findstr "!SERVICE_NAME!.*Up" > nul
|
108 |
+
if errorlevel 1 (
|
109 |
+
echo 启动Docker容器...
|
110 |
+
echo Starting Docker container...
|
111 |
+
%COMPOSE_CMD% up -d
|
112 |
+
|
113 |
+
REM 使用循环检查容器是否就绪
|
114 |
+
REM Use loop to check if container is ready
|
115 |
+
echo 等待容器启动...
|
116 |
+
echo Waiting for container to start...
|
117 |
+
set /a total_wait=0
|
118 |
+
|
119 |
+
:wait_loop
|
120 |
+
timeout /t !CHECK_INTERVAL_SECONDS! /nobreak > nul
|
121 |
+
set /a total_wait+=!CHECK_INTERVAL_SECONDS!
|
122 |
+
|
123 |
+
%COMPOSE_CMD% ps | findstr "!SERVICE_NAME!.*Up" > nul
|
124 |
+
if errorlevel 1 (
|
125 |
+
if !total_wait! LSS !MAX_WAIT_SECONDS! (
|
126 |
+
echo 容器尚未就绪,已等待!total_wait!秒,继续等待...
|
127 |
+
echo Container not ready yet, waited for !total_wait! seconds, continuing to wait...
|
128 |
+
goto :wait_loop
|
129 |
+
) else (
|
130 |
+
echo 错误:容器启动超时,已等待!MAX_WAIT_SECONDS!秒
|
131 |
+
echo Error: Container startup timeout, waited for !MAX_WAIT_SECONDS! seconds
|
132 |
+
echo 请检查Docker容器状态:%COMPOSE_CMD% ps
|
133 |
+
echo Please check Docker container status: %COMPOSE_CMD% ps
|
134 |
+
exit /b 1
|
135 |
+
)
|
136 |
+
) else (
|
137 |
+
echo 容器已就绪,共等待了!total_wait!秒
|
138 |
+
echo Container is ready, waited for !total_wait! seconds in total
|
139 |
+
)
|
140 |
+
)
|
141 |
+
|
142 |
+
REM 检查容器中是否存在xvfb-python命令
|
143 |
+
REM Check if xvfb-python command exists in container
|
144 |
+
echo 检查容器中的命令...
|
145 |
+
echo Checking commands in container...
|
146 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! which !PYTHON_CMD! > nul 2>&1
|
147 |
+
if errorlevel 1 (
|
148 |
+
echo 警告:容器中未找到!PYTHON_CMD!命令,尝试使用python替代
|
149 |
+
echo Warning: !PYTHON_CMD! command not found in container, trying to use python instead
|
150 |
+
set PYTHON_CMD=python
|
151 |
+
|
152 |
+
REM 检查python命令是否存在
|
153 |
+
REM Check if python command exists
|
154 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! which python > nul 2>&1
|
155 |
+
if errorlevel 1 (
|
156 |
+
echo 错误:容器中未找到python命令
|
157 |
+
echo Error: python command not found in container
|
158 |
+
echo 请检查容器配置
|
159 |
+
echo Please check container configuration
|
160 |
+
exit /b 1
|
161 |
+
)
|
162 |
+
)
|
163 |
+
|
164 |
+
REM 在容器中运行指定的脚本,传递查询参数
|
165 |
+
REM Run the specified script in container, passing query parameter
|
166 |
+
echo 在Docker容器中使用!PYTHON_CMD!运行脚本...
|
167 |
+
echo Running script in Docker container using !PYTHON_CMD!...
|
168 |
+
|
169 |
+
REM 修改执行命令,按照README中的方式执行
|
170 |
+
REM Modify execution command according to README
|
171 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! bash -c "cd .. && source .venv/bin/activate && cd owl && !PYTHON_CMD! !SCRIPT_NAME! \"!QUERY!\""
|
172 |
+
|
173 |
+
if errorlevel 0 (
|
174 |
+
echo 查询完成!
|
175 |
+
echo Query completed!
|
176 |
+
) else (
|
177 |
+
echo 查询执行失败,请检查错误信息。
|
178 |
+
echo Query execution failed, please check error messages.
|
179 |
+
)
|
180 |
+
|
181 |
+
pause
|
.container/run_in_docker.sh
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# 定义配置变量 | Define configuration variables
|
4 |
+
SERVICE_NAME="owl"
|
5 |
+
PYTHON_CMD="xvfb-python"
|
6 |
+
MAX_WAIT_SECONDS=60
|
7 |
+
CHECK_INTERVAL_SECONDS=2
|
8 |
+
|
9 |
+
# 检测操作系统类型 | Detect operating system type
|
10 |
+
OS_TYPE=$(uname -s)
|
11 |
+
echo "检测到操作系统 | Detected operating system: $OS_TYPE"
|
12 |
+
|
13 |
+
# 检查是否提供了查询参数 | Check if query parameters are provided
|
14 |
+
if [ $# -lt 1 ]; then
|
15 |
+
echo "用法 | Usage: ./run_in_docker.sh [脚本名称 | script name] '你的问题 | your question'"
|
16 |
+
echo "例如 | Example: ./run_in_docker.sh run.py '什么是人工智能? | What is artificial intelligence?'"
|
17 |
+
echo "或者 | Or: ./run_in_docker.sh run_deepseek_example.py '什么是人工智能? | What is artificial intelligence?'"
|
18 |
+
echo "如果不指定脚本名称,默认使用 run.py | If script name is not specified, run.py will be used by default"
|
19 |
+
exit 1
|
20 |
+
fi
|
21 |
+
|
22 |
+
# 判断第一个参数是否是脚本名称 | Determine if the first parameter is a script name
|
23 |
+
if [[ $1 == *.py ]]; then
|
24 |
+
SCRIPT_NAME="$1"
|
25 |
+
# 如果提供了第二个参数,则为查询内容 | If a second parameter is provided, it's the query content
|
26 |
+
if [ $# -ge 2 ]; then
|
27 |
+
QUERY="$2"
|
28 |
+
else
|
29 |
+
echo "请提供查询参数,例如 | Please provide query parameter, e.g.: ./run_in_docker.sh $SCRIPT_NAME '你的问题 | your question'"
|
30 |
+
exit 1
|
31 |
+
fi
|
32 |
+
else
|
33 |
+
# 如果第一个参数不是脚本名称,则默认使用 run.py | If the first parameter is not a script name, use run.py by default
|
34 |
+
SCRIPT_NAME="run.py"
|
35 |
+
QUERY="$1"
|
36 |
+
fi
|
37 |
+
|
38 |
+
# 检查脚本是否存在 | Check if the script exists
|
39 |
+
if [ ! -f "../owl/$SCRIPT_NAME" ]; then
|
40 |
+
echo "错误 | Error: 脚本 | Script '../owl/$SCRIPT_NAME' 不存在 | does not exist"
|
41 |
+
echo "可用的脚本有 | Available scripts:"
|
42 |
+
if [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
43 |
+
find ../owl -name "*.py" | grep -v "__" | sed 's/\\/\//g'
|
44 |
+
else
|
45 |
+
ls -1 ../owl/*.py | grep -v "__"
|
46 |
+
fi
|
47 |
+
exit 1
|
48 |
+
fi
|
49 |
+
|
50 |
+
echo "使用脚本 | Using script: $SCRIPT_NAME"
|
51 |
+
echo "查询内容 | Query content: $QUERY"
|
52 |
+
|
53 |
+
# 从docker-compose.yml获取服务名称(如果文件存在) | Get service name from docker-compose.yml (if file exists)
|
54 |
+
if [ -f "docker-compose.yml" ]; then
|
55 |
+
DETECTED_SERVICE=$(grep -E "^ [a-zA-Z0-9_-]*:" docker-compose.yml | head -1 | sed 's/^ \(.*\):.*/\1/')
|
56 |
+
if [ ! -z "$DETECTED_SERVICE" ]; then
|
57 |
+
SERVICE_NAME="$DETECTED_SERVICE"
|
58 |
+
echo "从docker-compose.yml检测到服务名称 | Detected service name from docker-compose.yml: $SERVICE_NAME"
|
59 |
+
fi
|
60 |
+
fi
|
61 |
+
|
62 |
+
# 检查Docker Compose命令 | Check Docker Compose command
|
63 |
+
if command -v docker-compose &> /dev/null; then
|
64 |
+
COMPOSE_CMD="docker-compose"
|
65 |
+
elif docker compose version &> /dev/null; then
|
66 |
+
COMPOSE_CMD="docker compose"
|
67 |
+
else
|
68 |
+
echo "错误 | Error: 未找到Docker Compose命令 | Docker Compose command not found"
|
69 |
+
exit 1
|
70 |
+
fi
|
71 |
+
|
72 |
+
# 确保Docker容器正在运行 | Ensure Docker container is running
|
73 |
+
CONTAINER_RUNNING=$($COMPOSE_CMD ps | grep -c "$SERVICE_NAME.*Up" || true)
|
74 |
+
if [ "$CONTAINER_RUNNING" -eq 0 ]; then
|
75 |
+
echo "启动Docker容器... | Starting Docker container..."
|
76 |
+
$COMPOSE_CMD up -d
|
77 |
+
|
78 |
+
# 使用循环检查容器是否就绪 | Use loop to check if container is ready
|
79 |
+
echo "等待容器启动... | Waiting for container to start..."
|
80 |
+
TOTAL_WAIT=0
|
81 |
+
|
82 |
+
while [ $TOTAL_WAIT -lt $MAX_WAIT_SECONDS ]; do
|
83 |
+
sleep $CHECK_INTERVAL_SECONDS
|
84 |
+
TOTAL_WAIT=$((TOTAL_WAIT + CHECK_INTERVAL_SECONDS))
|
85 |
+
|
86 |
+
CONTAINER_RUNNING=$($COMPOSE_CMD ps | grep -c "$SERVICE_NAME.*Up" || true)
|
87 |
+
if [ "$CONTAINER_RUNNING" -gt 0 ]; then
|
88 |
+
echo "容器已就绪,共等待了 $TOTAL_WAIT 秒 | Container is ready, waited for $TOTAL_WAIT seconds in total"
|
89 |
+
break
|
90 |
+
else
|
91 |
+
echo "容器尚未就绪,已等待 $TOTAL_WAIT 秒,继续等待... | Container not ready yet, waited for $TOTAL_WAIT seconds, continuing to wait..."
|
92 |
+
fi
|
93 |
+
done
|
94 |
+
|
95 |
+
if [ "$CONTAINER_RUNNING" -eq 0 ]; then
|
96 |
+
echo "错误 | Error:容器启动超时,已等待 $MAX_WAIT_SECONDS 秒 | Container startup timeout, waited for $MAX_WAIT_SECONDS seconds"
|
97 |
+
echo "请检查Docker容器状态 | Please check Docker container status:$COMPOSE_CMD ps"
|
98 |
+
exit 1
|
99 |
+
fi
|
100 |
+
fi
|
101 |
+
|
102 |
+
# 检查容器中是否存在指定的Python命令 | Check if specified Python command exists in container
|
103 |
+
echo "检查容器中的命令... | Checking commands in container..."
|
104 |
+
if ! $COMPOSE_CMD exec -T $SERVICE_NAME which $PYTHON_CMD &> /dev/null; then
|
105 |
+
echo "警告 | Warning:容器中未找到 $PYTHON_CMD 命令,尝试使用python替代 | $PYTHON_CMD command not found in container, trying to use python instead"
|
106 |
+
PYTHON_CMD="python"
|
107 |
+
|
108 |
+
# 检查python命令是否存在 | Check if python command exists
|
109 |
+
if ! $COMPOSE_CMD exec -T $SERVICE_NAME which python &> /dev/null; then
|
110 |
+
echo "��误 | Error:容器中未找到python命令 | python command not found in container"
|
111 |
+
echo "请检查容器配置 | Please check container configuration"
|
112 |
+
exit 1
|
113 |
+
fi
|
114 |
+
fi
|
115 |
+
|
116 |
+
# 在容器中运行指定的脚本,传递查询参数 | Run the specified script in container, passing query parameter
|
117 |
+
echo "在Docker容器中使用 $PYTHON_CMD 运行脚本... | Running script in Docker container using $PYTHON_CMD..."
|
118 |
+
|
119 |
+
# 根据操作系统类型执行不同的命令 | Execute different commands based on operating system type
|
120 |
+
if [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
|
121 |
+
# Windows可能需要特殊处理引号 | Windows may need special handling for quotes
|
122 |
+
winpty $COMPOSE_CMD exec -T $SERVICE_NAME bash -c "cd .. && source .venv/bin/activate && cd owl && $PYTHON_CMD $SCRIPT_NAME \"$QUERY\""
|
123 |
+
RESULT=$?
|
124 |
+
else
|
125 |
+
# macOS 或 Linux | macOS or Linux
|
126 |
+
$COMPOSE_CMD exec -T $SERVICE_NAME bash -c "cd .. && source .venv/bin/activate && cd owl && $PYTHON_CMD $SCRIPT_NAME \"$QUERY\""
|
127 |
+
RESULT=$?
|
128 |
+
fi
|
129 |
+
|
130 |
+
# 检查命令执行结果 | Check command execution result
|
131 |
+
if [ $RESULT -eq 0 ]; then
|
132 |
+
echo "查询完成! | Query completed!"
|
133 |
+
else
|
134 |
+
echo "查询执行失败,请检查错误信息。 | Query execution failed, please check error messages."
|
135 |
+
fi
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
3 |
+
rev: 'v0.7.4'
|
4 |
+
hooks:
|
5 |
+
- id: ruff
|
6 |
+
args: [--fix, --exit-non-zero-on-fix, --show-fixes]
|
7 |
+
exclude: ^docs/cookbooks/ # Ignore files under docs/cookbooks
|
8 |
+
- id: ruff-format
|
9 |
+
exclude: ^docs/cookbooks/ # Ignore files under docs/cookbooks
|
10 |
+
|
11 |
+
- repo: local
|
12 |
+
hooks:
|
13 |
+
- id: mypy
|
14 |
+
name: Check mypy
|
15 |
+
entry: mypy --namespace-packages -p owl
|
16 |
+
language: python
|
17 |
+
types: [python]
|
18 |
+
pass_filenames: false
|
19 |
+
require_serial: true
|
20 |
+
exclude: ^docs/cookbooks/ # Ignore files under docs/cookbooks
|
21 |
+
|
22 |
+
- repo: local
|
23 |
+
hooks:
|
24 |
+
- id: check-license
|
25 |
+
name: Check License
|
26 |
+
entry: python licenses/update_license.py . licenses/license_template.txt
|
27 |
+
language: system
|
28 |
+
types: [python]
|
29 |
+
exclude: ^docs/cookbooks/ # Ignore files under docs/cookbooks
|
README.md
CHANGED
@@ -64,89 +64,282 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
|
|
64 |
- [📋 Table of Contents](#-table-of-contents)
|
65 |
- [🔥 News](#-news)
|
66 |
- [🎬 Demo Video](#-demo-video)
|
|
|
67 |
- [🛠️ Installation](#️-installation)
|
68 |
- [**Clone the Github repository**](#clone-the-github-repository)
|
69 |
- [**Set up Environment**](#set-up-environment)
|
70 |
- [**Install Dependencies**](#install-dependencies)
|
71 |
- [**Setup Environment Variables**](#setup-environment-variables)
|
|
|
72 |
- [🚀 Quick Start](#-quick-start)
|
|
|
|
|
73 |
- [🧪 Experiments](#-experiments)
|
74 |
- [⏱️ Future Plans](#️-future-plans)
|
75 |
- [📄 License](#-license)
|
76 |
- [🖊️ Cite](#️-cite)
|
|
|
77 |
- [🔥 Community](#-community)
|
78 |
- [❓ FAQ](#-faq)
|
|
|
79 |
- [⭐ Star History](#-star-history)
|
80 |
|
81 |
|
82 |
# 🔥 News
|
83 |
|
84 |
-
|
85 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
# 🎬 Demo Video
|
88 |
|
89 |
-
https://
|
90 |
|
91 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
# 🛠️ Installation
|
95 |
|
96 |
-
|
|
|
|
|
97 |
|
98 |
```bash
|
|
|
99 |
git clone https://github.com/camel-ai/owl.git
|
|
|
|
|
100 |
cd owl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
```
|
102 |
|
103 |
-
##
|
104 |
|
105 |
-
Using Conda (recommended):
|
106 |
```bash
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
```
|
110 |
|
111 |
-
|
|
|
112 |
```bash
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
#
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
```
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
|
123 |
```bash
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
```
|
127 |
|
128 |
-
|
129 |
|
130 |
-
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
1. *Copy and Rename*: Duplicate the `.env_example` file and rename the copy to `.env`.
|
133 |
```bash
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
135 |
```
|
136 |
-
2. *Fill in Your Keys*: Open the `.env` file and insert your API keys in the corresponding fields. (For the minimal example (`run_mini.py`), you only need to configure the LLM API key (e.g., OPENAI_API_KEY).)
|
137 |
-
3. *For using more other models*: please refer to our CAMEL models docs:https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel
|
138 |
|
|
|
139 |
|
140 |
-
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
Run the following demo case:
|
145 |
|
146 |
```bash
|
147 |
python owl/run.py
|
148 |
```
|
149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
For a simpler version that only requires an LLM API key, you can try our minimal example:
|
151 |
|
152 |
```bash
|
@@ -162,35 +355,149 @@ question = "Task description here."
|
|
162 |
society = construct_society(question)
|
163 |
answer, chat_history, token_count = run_society(society)
|
164 |
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
```
|
167 |
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
- "Find the latest stock price for Apple Inc."
|
170 |
- "Analyze the sentiment of recent tweets about climate change"
|
171 |
- "Help me debug this Python code: [your code here]"
|
172 |
- "Summarize the main points from this research paper: [paper URL]"
|
|
|
173 |
|
174 |
-
#
|
175 |
|
176 |
-
|
177 |
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
```
|
182 |
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
```bash
|
185 |
-
|
|
|
|
|
|
|
|
|
186 |
```
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
# ⏱️ Future Plans
|
189 |
|
190 |
-
|
191 |
-
- [ ] Enhance the toolkit ecosystem with more specialized tools for domain-specific tasks.
|
192 |
-
- [ ] Develop more sophisticated agent interaction patterns and communication protocols
|
193 |
|
|
|
|
|
|
|
|
|
194 |
|
195 |
# 📄 License
|
196 |
|
@@ -211,17 +518,55 @@ If you find this repo useful, please cite:
|
|
211 |
}
|
212 |
```
|
213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
# 🔥 Community
|
|
|
|
|
215 |
Join us for further discussions!
|
216 |
<!--  -->
|
217 |

|
218 |
-
<!--  -->
|
219 |
|
220 |
# ❓ FAQ
|
221 |
|
222 |
-
**Q: Why
|
|
|
|
|
|
|
|
|
223 |
|
224 |
-
A:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
|
226 |
# ⭐ Star History
|
227 |
|
|
|
64 |
- [📋 Table of Contents](#-table-of-contents)
|
65 |
- [🔥 News](#-news)
|
66 |
- [🎬 Demo Video](#-demo-video)
|
67 |
+
- [✨️ Core Features](#-core-features)
|
68 |
- [🛠️ Installation](#️-installation)
|
69 |
- [**Clone the Github repository**](#clone-the-github-repository)
|
70 |
- [**Set up Environment**](#set-up-environment)
|
71 |
- [**Install Dependencies**](#install-dependencies)
|
72 |
- [**Setup Environment Variables**](#setup-environment-variables)
|
73 |
+
- [**Running with Docker**](#running-with-docker)
|
74 |
- [🚀 Quick Start](#-quick-start)
|
75 |
+
- [🧰 Toolkits and Capabilities](#-toolkits-and-capabilities)
|
76 |
+
- [🌐 Web Interface](#-web-interface)
|
77 |
- [🧪 Experiments](#-experiments)
|
78 |
- [⏱️ Future Plans](#️-future-plans)
|
79 |
- [📄 License](#-license)
|
80 |
- [🖊️ Cite](#️-cite)
|
81 |
+
- [🤝 Contributing](#-contributing)
|
82 |
- [🔥 Community](#-community)
|
83 |
- [❓ FAQ](#-faq)
|
84 |
+
- [📚 Exploring CAMEL Dependency](#-exploring-camel-dependency)
|
85 |
- [⭐ Star History](#-star-history)
|
86 |
|
87 |
|
88 |
# 🔥 News
|
89 |
|
90 |
+
|
91 |
+
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
|
92 |
+
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
|
93 |
+
🌟🌟🌟 <b>COMMUNITY CALL FOR USE CASES!</b> 🌟🌟🌟
|
94 |
+
</h3>
|
95 |
+
<p style="font-size: 1.1em; margin: 10px 0;">
|
96 |
+
We're inviting the community to contribute innovative use cases for OWL! <br>
|
97 |
+
The <b>top ten submissions</b> will receive special community gifts and recognition.
|
98 |
+
</p>
|
99 |
+
<p>
|
100 |
+
<a href="https://github.com/camel-ai/owl/tree/main/community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md" style="background-color: #d81b60; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">Learn More & Submit</a>
|
101 |
+
</p>
|
102 |
+
<p style="margin: 5px 0;">
|
103 |
+
Submission deadline: <b>March 31, 2025</b>
|
104 |
+
</p>
|
105 |
+
</div>
|
106 |
+
|
107 |
+
- **[2025.03.12]**: Added Bocha search in SearchToolkit, integrated Volcano Engine model platform, and enhanced Azure and OpenAI Compatible models with structured output and tool calling.
|
108 |
+
- **[2025.03.11]**: We added MCPToolkit, FileWriteToolkit, and TerminalToolkit to enhance OWL agents with MCP tool calling, file writing capabilities, and terminal command execution.
|
109 |
+
- **[2025.03.09]**: We added a web-based user interface that makes it easier to interact with the system.
|
110 |
+
- **[2025.03.07]**: We open-sourced the codebase of the 🦉 OWL project.
|
111 |
+
- **[2025.03.03]**: OWL achieved the #1 position among open-source frameworks on the GAIA benchmark with a score of 58.18.
|
112 |
+
|
113 |
|
114 |
# 🎬 Demo Video
|
115 |
|
116 |
+
https://github.com/user-attachments/assets/2a2a825d-39ea-45c5-9ba1-f9d58efbc372
|
117 |
|
118 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
119 |
|
120 |
+
# ✨️ Core Features
|
121 |
+
|
122 |
+
- **Real-time Information Retrieval**: Leverage Wikipedia, Google Search, and other online sources for up-to-date information.
|
123 |
+
- **Multimodal Processing**: Support for handling internet or local videos, images, and audio data.
|
124 |
+
- **Browser Automation**: Utilize the Playwright framework for simulating browser interactions, including scrolling, clicking, input handling, downloading, navigation, and more.
|
125 |
+
- **Document Parsing**: Extract content from Word, Excel, PDF, and PowerPoint files, converting them into text or Markdown format.
|
126 |
+
- **Code Execution**: Write and execute Python code using interpreter.
|
127 |
+
- **Built-in Toolkits**: Access to a comprehensive set of built-in toolkits including:
|
128 |
+
- **Model Context Protocol (MCP)**: A universal protocol layer that standardizes AI model interactions with various tools and data sources
|
129 |
+
- **Core Toolkits**: ArxivToolkit, AudioAnalysisToolkit, CodeExecutionToolkit, DalleToolkit, DataCommonsToolkit, ExcelToolkit, GitHubToolkit, GoogleMapsToolkit, GoogleScholarToolkit, ImageAnalysisToolkit, MathToolkit, NetworkXToolkit, NotionToolkit, OpenAPIToolkit, RedditToolkit, SearchToolkit, SemanticScholarToolkit, SymPyToolkit, VideoAnalysisToolkit, WeatherToolkit, BrowserToolkit, and many more for specialized tasks
|
130 |
|
131 |
# 🛠️ Installation
|
132 |
|
133 |
+
OWL supports multiple installation methods to fit your workflow preferences. Choose the option that works best for you.
|
134 |
+
|
135 |
+
## Option 1: Using uv (Recommended)
|
136 |
|
137 |
```bash
|
138 |
+
# Clone github repo
|
139 |
git clone https://github.com/camel-ai/owl.git
|
140 |
+
|
141 |
+
# Change directory into project directory
|
142 |
cd owl
|
143 |
+
|
144 |
+
# Install uv if you don't have it already
|
145 |
+
pip install uv
|
146 |
+
|
147 |
+
# Create a virtual environment and install dependencies
|
148 |
+
# We support using Python 3.10, 3.11, 3.12
|
149 |
+
uv venv .venv --python=3.10
|
150 |
+
|
151 |
+
# Activate the virtual environment
|
152 |
+
# For macOS/Linux
|
153 |
+
source .venv/bin/activate
|
154 |
+
# For Windows
|
155 |
+
.venv\Scripts\activate
|
156 |
+
|
157 |
+
# Install CAMEL with all dependencies
|
158 |
+
uv pip install -e .
|
159 |
+
|
160 |
+
# Exit the virtual environment when done
|
161 |
+
deactivate
|
162 |
```
|
163 |
|
164 |
+
## Option 2: Using venv and pip
|
165 |
|
|
|
166 |
```bash
|
167 |
+
# Clone github repo
|
168 |
+
git clone https://github.com/camel-ai/owl.git
|
169 |
+
|
170 |
+
# Change directory into project directory
|
171 |
+
cd owl
|
172 |
+
|
173 |
+
# Create a virtual environment
|
174 |
+
# For Python 3.10 (also works with 3.11, 3.12)
|
175 |
+
python3.10 -m venv .venv
|
176 |
+
|
177 |
+
# Activate the virtual environment
|
178 |
+
# For macOS/Linux
|
179 |
+
source .venv/bin/activate
|
180 |
+
# For Windows
|
181 |
+
.venv\Scripts\activate
|
182 |
+
|
183 |
+
# Install from requirements.txt
|
184 |
+
pip install -r requirements.txt --use-pep517
|
185 |
```
|
186 |
|
187 |
+
## Option 3: Using conda
|
188 |
+
|
189 |
```bash
|
190 |
+
# Clone github repo
|
191 |
+
git clone https://github.com/camel-ai/owl.git
|
192 |
+
|
193 |
+
# Change directory into project directory
|
194 |
+
cd owl
|
195 |
+
|
196 |
+
# Create a conda environment
|
197 |
+
conda create -n owl python=3.10
|
198 |
+
|
199 |
+
# Activate the conda environment
|
200 |
+
conda activate owl
|
201 |
+
|
202 |
+
# Option 1: Install as a package (recommended)
|
203 |
+
pip install -e .
|
204 |
+
|
205 |
+
# Option 2: Install from requirements.txt
|
206 |
+
pip install -r requirements.txt --use-pep517
|
207 |
+
|
208 |
+
# Exit the conda environment when done
|
209 |
+
conda deactivate
|
210 |
```
|
211 |
|
212 |
+
## **Setup Environment Variables**
|
213 |
+
|
214 |
+
OWL requires various API keys to interact with different services. The `owl/.env_template` file contains placeholders for all necessary API keys along with links to the services where you can register for them.
|
215 |
+
|
216 |
+
### Option 1: Using a `.env` File (Recommended)
|
217 |
+
|
218 |
+
1. **Copy and Rename the Template**:
|
219 |
+
```bash
|
220 |
+
cd owl
|
221 |
+
cp .env_template .env
|
222 |
+
```
|
223 |
+
|
224 |
+
2. **Configure Your API Keys**:
|
225 |
+
Open the `.env` file in your preferred text editor and insert your API keys in the corresponding fields.
|
226 |
+
|
227 |
+
> **Note**: For the minimal example (`run_mini.py`), you only need to configure the LLM API key (e.g., `OPENAI_API_KEY`).
|
228 |
+
|
229 |
+
### Option 2: Setting Environment Variables Directly
|
230 |
+
|
231 |
+
Alternatively, you can set environment variables directly in your terminal:
|
232 |
+
|
233 |
+
- **macOS/Linux (Bash/Zsh)**:
|
234 |
+
```bash
|
235 |
+
export OPENAI_API_KEY="your-openai-api-key-here"
|
236 |
+
```
|
237 |
+
|
238 |
+
- **Windows (Command Prompt)**:
|
239 |
+
```batch
|
240 |
+
set OPENAI_API_KEY="your-openai-api-key-here"
|
241 |
+
```
|
242 |
+
|
243 |
+
- **Windows (PowerShell)**:
|
244 |
+
```powershell
|
245 |
+
$env:OPENAI_API_KEY = "your-openai-api-key-here"
|
246 |
+
```
|
247 |
|
248 |
+
> **Note**: Environment variables set directly in the terminal will only persist for the current session.
|
249 |
+
|
250 |
+
|
251 |
+
|
252 |
+
## **Running with Docker**
|
253 |
|
254 |
```bash
|
255 |
+
# Clone the repository
|
256 |
+
git clone https://github.com/camel-ai/owl.git
|
257 |
+
cd owl
|
258 |
+
|
259 |
+
# Configure environment variables
|
260 |
+
cp owl/.env_template owl/.env
|
261 |
+
# Edit the .env file and fill in your API keys
|
262 |
+
|
263 |
+
|
264 |
+
# Option 1: Using docker-compose directly
|
265 |
+
cd .container
|
266 |
+
|
267 |
+
docker-compose up -d
|
268 |
+
|
269 |
+
# Run OWL inside the container
|
270 |
+
docker-compose exec owl bash -c "cd .. && source .venv/bin/activate && cd owl"
|
271 |
+
|
272 |
+
#run example demo script
|
273 |
+
xvfb-python run.py
|
274 |
+
|
275 |
+
# Option 2: Build and run using the provided scripts
|
276 |
+
cd .container
|
277 |
+
chmod +x build_docker.sh
|
278 |
+
./build_docker.sh
|
279 |
+
# Run OWL inside the container
|
280 |
+
./run_in_docker.sh "your question"
|
281 |
```
|
282 |
|
283 |
+
For more detailed Docker usage instructions, including cross-platform support, optimized configurations, and troubleshooting, please refer to [DOCKER_README.md](.container/DOCKER_README_en.md).
|
284 |
|
285 |
+
# 🚀 Quick Start
|
286 |
+
|
287 |
+
## Try MCP (Model Context Protocol) Integration
|
288 |
+
|
289 |
+
Experience the power of MCP by running our example that demonstrates multi-agent information retrieval and processing:
|
290 |
|
|
|
291 |
```bash
|
292 |
+
# Set up MCP servers (one-time setup)
|
293 |
+
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
|
294 |
+
npx @wonderwhy-er/desktop-commander setup
|
295 |
+
|
296 |
+
# Run the MCP example
|
297 |
+
python owl/run_mcp.py
|
298 |
```
|
|
|
|
|
299 |
|
300 |
+
This example showcases how OWL agents can seamlessly interact with file systems, web automation, and information retrieval through the MCP protocol. Check out `owl/run_mcp.py` for the full implementation.
|
301 |
|
302 |
+
## Basic Usage
|
303 |
|
304 |
+
After installation and setting up your environment variables, you can start using OWL right away:
|
|
|
|
|
305 |
|
306 |
```bash
|
307 |
python owl/run.py
|
308 |
```
|
309 |
|
310 |
+
## Running with Different Models
|
311 |
+
|
312 |
+
### Model Requirements
|
313 |
+
|
314 |
+
- **Tool Calling**: OWL requires models with robust tool calling capabilities to interact with various toolkits. Models must be able to understand tool descriptions, generate appropriate tool calls, and process tool outputs.
|
315 |
+
|
316 |
+
- **Multimodal Understanding**: For tasks involving web interaction, image analysis, or video processing, models with multimodal capabilities are required to interpret visual content and context.
|
317 |
+
|
318 |
+
#### Supported Models
|
319 |
+
|
320 |
+
For information on configuring AI models, please refer to our [CAMEL models documentation](https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel).
|
321 |
+
|
322 |
+
> **Note**: For optimal performance, we strongly recommend using OpenAI models (GPT-4 or later versions). Our experiments show that other models may result in significantly lower performance on complex tasks and benchmarks, especially those requiring advanced multi-modal understanding and tool use.
|
323 |
+
|
324 |
+
OWL supports various LLM backends, though capabilities may vary depending on the model's tool calling and multimodal abilities. You can use the following scripts to run with different models:
|
325 |
+
|
326 |
+
```bash
|
327 |
+
# Run with Qwen model
|
328 |
+
python owl/run_qwen_zh.py
|
329 |
+
|
330 |
+
# Run with Deepseek model
|
331 |
+
python owl/run_deepseek_zh.py
|
332 |
+
|
333 |
+
# Run with other OpenAI-compatible models
|
334 |
+
python owl/run_openai_compatiable_model.py
|
335 |
+
|
336 |
+
# Run with Azure OpenAI
|
337 |
+
python owl/run_azure_openai.py
|
338 |
+
|
339 |
+
# Run with Ollama
|
340 |
+
python owl/run_ollama.py
|
341 |
+
```
|
342 |
+
|
343 |
For a simpler version that only requires an LLM API key, you can try our minimal example:
|
344 |
|
345 |
```bash
|
|
|
355 |
society = construct_society(question)
|
356 |
answer, chat_history, token_count = run_society(society)
|
357 |
|
358 |
+
print(f"\033[94mAnswer: {answer}\033[0m")
|
359 |
+
```
|
360 |
+
|
361 |
+
For uploading files, simply provide the file path along with your question:
|
362 |
+
|
363 |
+
```python
|
364 |
+
# Task with a local file (e.g., file path: `tmp/example.docx`)
|
365 |
+
question = "What is in the given DOCX file? Here is the file path: tmp/example.docx"
|
366 |
+
|
367 |
+
society = construct_society(question)
|
368 |
+
answer, chat_history, token_count = run_society(society)
|
369 |
+
print(f"\033[94mAnswer: {answer}\033[0m")
|
370 |
```
|
371 |
|
372 |
+
OWL will then automatically invoke document-related tools to process the file and extract the answer.
|
373 |
+
|
374 |
+
|
375 |
+
### Example Tasks
|
376 |
+
|
377 |
+
Here are some tasks you can try with OWL:
|
378 |
+
|
379 |
- "Find the latest stock price for Apple Inc."
|
380 |
- "Analyze the sentiment of recent tweets about climate change"
|
381 |
- "Help me debug this Python code: [your code here]"
|
382 |
- "Summarize the main points from this research paper: [paper URL]"
|
383 |
+
- "Create a data visualization for this dataset: [dataset path]"
|
384 |
|
385 |
+
# 🧰 Toolkits and Capabilities
|
386 |
|
387 |
+
## Model Context Protocol (MCP)
|
388 |
|
389 |
+
OWL's MCP integration provides a standardized way for AI models to interact with various tools and data sources:
|
390 |
+
|
391 |
+
Try our comprehensive MCP example in `owl/run_mcp.py` to see these capabilities in action!
|
392 |
+
|
393 |
+
## Available Toolkits
|
394 |
+
|
395 |
+
> **Important**: Effective use of toolkits requires models with strong tool calling capabilities. For multimodal toolkits (Web, Image, Video), models must also have multimodal understanding abilities.
|
396 |
+
|
397 |
+
OWL supports various toolkits that can be customized by modifying the `tools` list in your script:
|
398 |
+
|
399 |
+
```python
|
400 |
+
# Configure toolkits
|
401 |
+
tools = [
|
402 |
+
*BrowserToolkit(headless=False).get_tools(), # Browser automation
|
403 |
+
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
404 |
+
*AudioAnalysisToolkit().get_tools(), # Requires OpenAI Key
|
405 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
406 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
407 |
+
SearchToolkit().search_duckduckgo,
|
408 |
+
SearchToolkit().search_google, # Comment out if unavailable
|
409 |
+
SearchToolkit().search_wiki,
|
410 |
+
*ExcelToolkit().get_tools(),
|
411 |
+
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
412 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
413 |
+
]
|
414 |
```
|
415 |
|
416 |
+
## Available Toolkits
|
417 |
+
|
418 |
+
Key toolkits include:
|
419 |
+
|
420 |
+
### Multimodal Toolkits (Require multimodal model capabilities)
|
421 |
+
- **BrowserToolkit**: Browser automation for web interaction and navigation
|
422 |
+
- **VideoAnalysisToolkit**: Video processing and content analysis
|
423 |
+
- **ImageAnalysisToolkit**: Image analysis and interpretation
|
424 |
+
|
425 |
+
### Text-Based Toolkits
|
426 |
+
- **AudioAnalysisToolkit**: Audio processing (requires OpenAI API)
|
427 |
+
- **CodeExecutionToolkit**: Python code execution and evaluation
|
428 |
+
- **SearchToolkit**: Web searches (Google, DuckDuckGo, Wikipedia)
|
429 |
+
- **DocumentProcessingToolkit**: Document parsing (PDF, DOCX, etc.)
|
430 |
+
|
431 |
+
Additional specialized toolkits: ArxivToolkit, GitHubToolkit, GoogleMapsToolkit, MathToolkit, NetworkXToolkit, NotionToolkit, RedditToolkit, WeatherToolkit, and more. For a complete list, see the [CAMEL toolkits documentation](https://docs.camel-ai.org/key_modules/tools.html#built-in-toolkits).
|
432 |
+
|
433 |
+
## Customizing Your Configuration
|
434 |
+
|
435 |
+
To customize available tools:
|
436 |
+
|
437 |
+
```python
|
438 |
+
# 1. Import toolkits
|
439 |
+
from camel.toolkits import BrowserToolkit, SearchToolkit, CodeExecutionToolkit
|
440 |
+
|
441 |
+
# 2. Configure tools list
|
442 |
+
tools = [
|
443 |
+
*BrowserToolkit(headless=True).get_tools(),
|
444 |
+
SearchToolkit().search_wiki,
|
445 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
446 |
+
]
|
447 |
+
|
448 |
+
# 3. Pass to assistant agent
|
449 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
450 |
+
```
|
451 |
+
|
452 |
+
Selecting only necessary toolkits optimizes performance and reduces resource usage.
|
453 |
+
|
454 |
+
# 🌐 Web Interface
|
455 |
+
|
456 |
+
OWL includes an intuitive web-based user interface that makes it easier to interact with the system.
|
457 |
+
|
458 |
+
## Starting the Web UI
|
459 |
+
|
460 |
```bash
|
461 |
+
# Start the Chinese version
|
462 |
+
python run_app_zh.py
|
463 |
+
|
464 |
+
# Start the English version
|
465 |
+
python run_app.py
|
466 |
```
|
467 |
|
468 |
+
## Features
|
469 |
+
|
470 |
+
- **Easy Model Selection**: Choose between different models (OpenAI, Qwen, DeepSeek, etc.)
|
471 |
+
- **Environment Variable Management**: Configure your API keys and other settings directly from the UI
|
472 |
+
- **Interactive Chat Interface**: Communicate with OWL agents through a user-friendly interface
|
473 |
+
- **Task History**: View the history and results of your interactions
|
474 |
+
|
475 |
+
The web interface is built using Gradio and runs locally on your machine. No data is sent to external servers beyond what's required for the model API calls you configure.
|
476 |
+
|
477 |
+
# 🧪 Experiments
|
478 |
+
|
479 |
+
To reproduce OWL's GAIA benchmark score of 58.18:
|
480 |
+
|
481 |
+
1. Switch to the `gaia58.18` branch:
|
482 |
+
```bash
|
483 |
+
git checkout gaia58.18
|
484 |
+
```
|
485 |
+
|
486 |
+
2. Run the evaluation script:
|
487 |
+
```bash
|
488 |
+
python run_gaia_roleplaying.py
|
489 |
+
```
|
490 |
+
|
491 |
+
This will execute the same configuration that achieved our top-ranking performance on the GAIA benchmark.
|
492 |
+
|
493 |
# ⏱️ Future Plans
|
494 |
|
495 |
+
We're continuously working to improve OWL. Here's what's on our roadmap:
|
|
|
|
|
496 |
|
497 |
+
- [ ] Write a technical blog post detailing our exploration and insights in multi-agent collaboration in real-world tasks
|
498 |
+
- [ ] Enhance the toolkit ecosystem with more specialized tools for domain-specific tasks
|
499 |
+
- [ ] Develop more sophisticated agent interaction patterns and communication protocols
|
500 |
+
- [ ] Improve performance on complex multi-step reasoning tasks
|
501 |
|
502 |
# 📄 License
|
503 |
|
|
|
518 |
}
|
519 |
```
|
520 |
|
521 |
+
# 🤝 Contributing
|
522 |
+
|
523 |
+
We welcome contributions from the community! Here's how you can help:
|
524 |
+
|
525 |
+
1. Read our [Contribution Guidelines](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md)
|
526 |
+
2. Check [open issues](https://github.com/camel-ai/camel/issues) or create new ones
|
527 |
+
3. Submit pull requests with your improvements
|
528 |
+
|
529 |
+
**Current Issues Open for Contribution:**
|
530 |
+
- [#1857](https://github.com/camel-ai/camel/issues/1857)
|
531 |
+
- [#1770](https://github.com/camel-ai/camel/issues/1770)
|
532 |
+
- [#1712](https://github.com/camel-ai/camel/issues/1712)
|
533 |
+
- [#1537](https://github.com/camel-ai/camel/issues/1537)
|
534 |
+
|
535 |
+
|
536 |
+
To take on an issue, simply leave a comment stating your interest.
|
537 |
+
|
538 |
# 🔥 Community
|
539 |
+
Join us ([*Discord*](https://discord.camel-ai.org/) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of finding the scaling laws of agents.
|
540 |
+
|
541 |
Join us for further discussions!
|
542 |
<!--  -->
|
543 |

|
|
|
544 |
|
545 |
# ❓ FAQ
|
546 |
|
547 |
+
**Q: Why don't I see Chrome running locally after starting the example script?**
|
548 |
+
|
549 |
+
A: If OWL determines that a task can be completed using non-browser tools (such as search or code execution), the browser will not be launched. The browser window will only appear when OWL determines that browser-based interaction is necessary.
|
550 |
+
|
551 |
+
**Q: Which Python version should I use?**
|
552 |
|
553 |
+
A: OWL supports Python 3.10, 3.11, and 3.12.
|
554 |
+
|
555 |
+
**Q: How can I contribute to the project?**
|
556 |
+
|
557 |
+
A: See our [Contributing](#-contributing) section for details on how to get involved. We welcome contributions of all kinds, from code improvements to documentation updates.
|
558 |
+
|
559 |
+
# 📚 Exploring CAMEL Dependency
|
560 |
+
|
561 |
+
OWL is built on top of the [CAMEL](https://github.com/camel-ai/camel) Framework, here's how you can explore the CAMEL source code and understand how it works with OWL:
|
562 |
+
|
563 |
+
## Accessing CAMEL Source Code
|
564 |
+
|
565 |
+
```bash
|
566 |
+
# Clone the CAMEL repository
|
567 |
+
git clone https://github.com/camel-ai/camel.git
|
568 |
+
cd camel
|
569 |
+
```
|
570 |
|
571 |
# ⭐ Star History
|
572 |
|
README_zh.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
<h1 align="center">
|
2 |
🦉 OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation
|
3 |
-
|
4 |
</h1>
|
5 |
|
6 |
|
@@ -65,23 +65,50 @@
|
|
65 |
- [📋 目录](#-目录)
|
66 |
- [🔥 新闻](#-新闻)
|
67 |
- [🎬 演示视频](#-演示视频)
|
|
|
68 |
- [🛠️ 安装](#️-安装)
|
69 |
-
- [
|
70 |
-
- [
|
71 |
-
- [
|
72 |
- [**设置环境变量**](#设置环境变量)
|
|
|
73 |
- [🚀 快速开始](#-快速开始)
|
|
|
|
|
74 |
- [🧪 实验](#-实验)
|
75 |
- [⏱️ 未来计划](#️-未来计划)
|
76 |
- [📄 许可证](#-许可证)
|
77 |
- [🖊️ 引用](#️-引用)
|
|
|
78 |
- [🔥 社区](#-社区)
|
79 |
- [❓ 常见问题](#-常见问题)
|
|
|
|
|
80 |
|
81 |
|
82 |
# 🔥 新闻
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
- **[2025.03.07]**: 我们开源了 🦉 OWL 项目的代码库。
|
|
|
85 |
|
86 |
# 🎬 演示视频
|
87 |
|
@@ -89,49 +116,184 @@ https://private-user-images.githubusercontent.com/55657767/420211368-f29f477d-7e
|
|
89 |
|
90 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
# 🛠️ 安装
|
93 |
|
94 |
-
##
|
95 |
|
96 |
```bash
|
|
|
97 |
git clone https://github.com/camel-ai/owl.git
|
|
|
|
|
98 |
cd owl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
```
|
100 |
|
101 |
-
##
|
102 |
|
103 |
-
使用 Conda(推荐):
|
104 |
```bash
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
```
|
108 |
|
109 |
-
|
|
|
110 |
```bash
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
#
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
```
|
117 |
|
118 |
-
##
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
```bash
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
-
|
|
|
125 |
|
126 |
-
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
3. *如需使用更多其他模型*:请参考我们CAMEL的models文档:https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel
|
131 |
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
# 🚀 快速开始
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
运行以下示例:
|
137 |
|
@@ -145,6 +307,39 @@ python owl/run.py
|
|
145 |
python owl/run_mini.py
|
146 |
```
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
你可以通过修改 `run.py` 脚本来运行自己的任务:
|
149 |
|
150 |
```python
|
@@ -154,14 +349,119 @@ question = "Task description here."
|
|
154 |
society = construct_society(question)
|
155 |
answer, chat_history, token_count = run_society(society)
|
156 |
|
157 |
-
|
158 |
```
|
159 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
你可以尝试以下示例任务:
|
161 |
- "查询苹果公司的最新股票价格"
|
162 |
- "分析关于气候变化的最新推文情绪"
|
163 |
- "帮我调试这段 Python 代码:[在此粘贴你的代码]"
|
164 |
- "总结这篇研究论文的主要观点:[论文URL]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
# 🧪 实验
|
166 |
|
167 |
我们提供了一个脚本用于复现 GAIA 上的实验结果。
|
@@ -179,10 +479,12 @@ python run_gaia_roleplaying.py
|
|
179 |
|
180 |
# ⏱️ 未来计划
|
181 |
|
182 |
-
|
183 |
-
- [ ] 通过引入更多针对特定领域任务的专业工具,进一步完善工具生态系统。
|
184 |
-
- [ ] 开发更复杂的智能体交互模式和通信协议
|
185 |
|
|
|
|
|
|
|
|
|
186 |
|
187 |
# 📄 许可证
|
188 |
|
@@ -203,7 +505,25 @@ python run_gaia_roleplaying.py
|
|
203 |
}
|
204 |
```
|
205 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
# 🔥 社区
|
|
|
|
|
207 |
加入我们,参与更多讨论!
|
208 |
<!--  -->
|
209 |

|
@@ -211,10 +531,33 @@ python run_gaia_roleplaying.py
|
|
211 |
|
212 |
# ❓ 常见问题
|
213 |
|
214 |
-
**Q:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
-
|
217 |
|
|
|
218 |
|
219 |
[docs-image]: https://img.shields.io/badge/Documentation-EB3ECC
|
220 |
[docs-url]: https://camel-ai.github.io/camel/index.html
|
|
|
1 |
<h1 align="center">
|
2 |
🦉 OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation
|
3 |
+
🦉 OWL: 优化劳动力学习的通用智能体,用于处理现实世界的自动化任务
|
4 |
</h1>
|
5 |
|
6 |
|
|
|
65 |
- [📋 目录](#-目录)
|
66 |
- [🔥 新闻](#-新闻)
|
67 |
- [🎬 演示视频](#-演示视频)
|
68 |
+
- [✨️ 核心功能](#-核心功能)
|
69 |
- [🛠️ 安装](#️-安装)
|
70 |
+
- [**选项1:使用 uv(推荐)**](#选项1使用-uv推荐)
|
71 |
+
- [**选项2:使用 venv 和 pip**](#选项2使用-venv-和-pip)
|
72 |
+
- [**选项3:使用 conda**](#选项3使用-conda)
|
73 |
- [**设置环境变量**](#设置环境变量)
|
74 |
+
- [**使用Docker运行**](#使用docker运行)
|
75 |
- [🚀 快速开始](#-快速开始)
|
76 |
+
- [🧰 工具包与功能](#-工具包与功能)
|
77 |
+
- [🌐 网页界面](#-网页界面)
|
78 |
- [🧪 实验](#-实验)
|
79 |
- [⏱️ 未来计划](#️-未来计划)
|
80 |
- [📄 许可证](#-许可证)
|
81 |
- [🖊️ 引用](#️-引用)
|
82 |
+
- [🤝 贡献](#-贡献)
|
83 |
- [🔥 社区](#-社区)
|
84 |
- [❓ 常见问题](#-常见问题)
|
85 |
+
- [📚 探索 CAMEL 依赖](#-探索-camel-依赖)
|
86 |
+
- [⭐ Star History](#-star-history)
|
87 |
|
88 |
|
89 |
# 🔥 新闻
|
90 |
|
91 |
+
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
|
92 |
+
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
|
93 |
+
🌟🌟🌟 <b>OWL社区用例征集令!</b> 🌟🌟🌟
|
94 |
+
</h3>
|
95 |
+
<p style="font-size: 1.1em; margin: 10px 0;">
|
96 |
+
我们请社区成员贡献创新的OWL用例!<br>
|
97 |
+
<b>前十名提交</b>将获得特别社区礼物和认可。
|
98 |
+
</p>
|
99 |
+
<p>
|
100 |
+
<a href="https://github.com/camel-ai/owl/tree/main/community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md" style="background-color: #d81b60; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">了解更多并提交</a>
|
101 |
+
</p>
|
102 |
+
<p style="margin: 5px 0;">
|
103 |
+
提交截止日期:<b>2025年3月31日</b>
|
104 |
+
</p>
|
105 |
+
</div>
|
106 |
+
|
107 |
+
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能,集成了火山引擎模型平台,并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
|
108 |
+
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit,增强了 OWL Agent 的 MCP(模型上下文协议)集成、文件写入能力和终端命令执行功能。MCP 作为一个通用协议层,标准化了 AI 模型与各种数据源和工具的交互方式。
|
109 |
+
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。
|
110 |
- **[2025.03.07]**: 我们开源了 🦉 OWL 项目的代码库。
|
111 |
+
- **[2025.03.03]**: OWL 在 GAIA 基准测试中取得 58.18 平均分,在开源框架中排名第一!
|
112 |
|
113 |
# 🎬 演示视频
|
114 |
|
|
|
116 |
|
117 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
118 |
|
119 |
+
# ✨️ 核心功能
|
120 |
+
|
121 |
+
- **在线搜索**:使用维基百科、谷歌搜索等,进行实时信息检索
|
122 |
+
- **多模态处理**:支持互联网或本地视频、图片、语音处理
|
123 |
+
- **浏览器操作**:借助Playwright框架开发浏览器模拟交互,支持页面滚动、点击、输入、下载、历史回退等功能
|
124 |
+
- **文件解析**:word、excel、PDF、PowerPoint信息提取,内容转文本/Markdown
|
125 |
+
- **代码执行**:编写python代码,并使用解释器运行
|
126 |
+
- **丰富工具包**:提供丰富的工具包,包括ArxivToolkit(学术论文检索)、AudioAnalysisToolkit(音频分析)、CodeExecutionToolkit(代码执行)、DalleToolkit(图像生成)、DataCommonsToolkit(数据共享)、ExcelToolkit(Excel处理)、GitHubToolkit(GitHub交互)、GoogleMapsToolkit(地图服务)、GoogleScholarToolkit(学术搜索)、ImageAnalysisToolkit(图像分析)、MathToolkit(数学计算)、NetworkXToolkit(图形分析)、NotionToolkit(Notion交互)、OpenAPIToolkit(API操作)、RedditToolkit(Reddit交互)、SearchToolkit(搜索服务)、SemanticScholarToolkit(语义学术搜索)、SymPyToolkit(符号计算)、VideoAnalysisToolkit(视频分析)、WeatherToolkit(天气查询)、BrowserToolkit(网页交互)等多种专业工具,满足各类特定任务需求。
|
127 |
+
|
128 |
# 🛠️ 安装
|
129 |
|
130 |
+
## 选项1:使用 uv(推荐)
|
131 |
|
132 |
```bash
|
133 |
+
# 克隆 GitHub 仓库
|
134 |
git clone https://github.com/camel-ai/owl.git
|
135 |
+
|
136 |
+
# 进入项目目录
|
137 |
cd owl
|
138 |
+
|
139 |
+
# 如果你还没有安装 uv,请先安装
|
140 |
+
pip install uv
|
141 |
+
|
142 |
+
# 创建虚拟环境并安装依赖
|
143 |
+
# 我们支持使用 Python 3.10、3.11、3.12
|
144 |
+
uv venv .venv --python=3.10
|
145 |
+
|
146 |
+
# 激活虚拟环境
|
147 |
+
# 对于 macOS/Linux
|
148 |
+
source .venv/bin/activate
|
149 |
+
# 对于 Windows
|
150 |
+
.venv\Scripts\activate
|
151 |
+
|
152 |
+
# 安装 CAMEL 及其所有依赖
|
153 |
+
uv pip install -e .
|
154 |
+
|
155 |
+
# 完成后退出虚拟环境
|
156 |
+
deactivate
|
157 |
```
|
158 |
|
159 |
+
## 选项2:使用 venv 和 pip
|
160 |
|
|
|
161 |
```bash
|
162 |
+
# 克隆 GitHub 仓库
|
163 |
+
git clone https://github.com/camel-ai/owl.git
|
164 |
+
|
165 |
+
# 进入项目目录
|
166 |
+
cd owl
|
167 |
+
|
168 |
+
# 创建虚拟环境
|
169 |
+
# 对于 Python 3.10(也适用于 3.11、3.12)
|
170 |
+
python3.10 -m venv .venv
|
171 |
+
|
172 |
+
# 激活虚拟环境
|
173 |
+
# 对于 macOS/Linux
|
174 |
+
source .venv/bin/activate
|
175 |
+
# 对于 Windows
|
176 |
+
.venv\Scripts\activate
|
177 |
+
|
178 |
+
# 从 requirements.txt 安装
|
179 |
+
pip install -r requirements.txt --use-pep517
|
180 |
```
|
181 |
|
182 |
+
## 选项3:使用 conda
|
183 |
+
|
184 |
```bash
|
185 |
+
# 克隆 GitHub 仓库
|
186 |
+
git clone https://github.com/camel-ai/owl.git
|
187 |
+
|
188 |
+
# 进入项目目录
|
189 |
+
cd owl
|
190 |
+
|
191 |
+
# 创建 conda 环境
|
192 |
+
conda create -n owl python=3.10
|
193 |
+
|
194 |
+
# 激活 conda 环境
|
195 |
+
conda activate owl
|
196 |
+
|
197 |
+
# 选项1:作为包安装(推荐)
|
198 |
+
pip install -e .
|
199 |
+
|
200 |
+
# 选项2:从 requirements.txt 安装
|
201 |
+
pip install -r requirements.txt --use-pep517
|
202 |
+
|
203 |
+
# 完成后退出 conda 环境
|
204 |
+
conda deactivate
|
205 |
```
|
206 |
|
207 |
+
## **设置环境变量**
|
208 |
+
|
209 |
+
OWL 需要各种 API 密钥来与不同的服务进行交互。`owl/.env_template` 文件包含了所有必要 API 密钥的占位符,以及可以注册这些服务的链接。
|
210 |
+
|
211 |
+
### 选项 1:使用 `.env` 文件(推荐)
|
212 |
+
|
213 |
+
1. **复制并重命名模板**:
|
214 |
+
```bash
|
215 |
+
cd owl
|
216 |
+
cp .env_template .env
|
217 |
+
```
|
218 |
+
|
219 |
+
2. **配置你的 API 密钥**:
|
220 |
+
在你喜欢的文本编辑器中打开 `.env` 文件,并在相应字段中插入你的 API 密钥。
|
221 |
+
|
222 |
+
> **注意**:对于最小示例(`run_mini.py`),你只需要配置 LLM API 密钥(例如,`OPENAI_API_KEY`)。
|
223 |
+
|
224 |
+
### 选项 2:直接设置环境变量
|
225 |
+
|
226 |
+
或者,你可以直接在终端中设置环境变量:
|
227 |
+
|
228 |
+
- **macOS/Linux (Bash/Zsh)**:
|
229 |
+
```bash
|
230 |
+
export OPENAI_API_KEY="你的-openai-api-密钥"
|
231 |
+
```
|
232 |
+
|
233 |
+
- **Windows (命令提示符)**:
|
234 |
+
```batch
|
235 |
+
set OPENAI_API_KEY="你的-openai-api-密钥"
|
236 |
+
```
|
237 |
+
|
238 |
+
- **Windows (PowerShell)**:
|
239 |
+
```powershell
|
240 |
+
$env:OPENAI_API_KEY = "你的-openai-api-密钥"
|
241 |
+
```
|
242 |
+
|
243 |
+
> **注意**:直接在终端中设置的环境变量仅在当前会话中有效。
|
244 |
+
|
245 |
+
## **使用Docker运行**
|
246 |
+
|
247 |
+
如果您希望使用Docker运行OWL项目,我们提供了完整的Docker支持:
|
248 |
|
249 |
```bash
|
250 |
+
# 克隆仓库
|
251 |
+
git clone https://github.com/camel-ai/owl.git
|
252 |
+
cd owl
|
253 |
+
|
254 |
+
# 配置环境变量
|
255 |
+
cp owl/.env_template owl/.env
|
256 |
+
# 编辑.env文件,填入您的API密钥
|
257 |
|
258 |
+
# 选项1:直接使用docker-compose
|
259 |
+
cd .container
|
260 |
|
261 |
+
docker-compose up -d
|
262 |
|
263 |
+
# 在容器中运行OWL
|
264 |
+
docker-compose exec owl bash -c "cd .. && source .venv/bin/activate && cd owl"
|
|
|
265 |
|
266 |
+
#运行例子演示脚本
|
267 |
+
xvfb-python run.py
|
268 |
+
|
269 |
+
# 选项2:使用提供的脚本构建和运行
|
270 |
+
cd .container
|
271 |
+
chmod +x build_docker.sh
|
272 |
+
./build_docker.sh
|
273 |
+
# 在容器中运行OWL
|
274 |
+
./run_in_docker.sh "您的问题"
|
275 |
+
```
|
276 |
+
|
277 |
+
更多详细的Docker使用说明,包括跨平台支持、优化配置和故障排除,请参阅 [DOCKER_README.md](.container/DOCKER_README.md)
|
278 |
|
279 |
# 🚀 快速开始
|
280 |
+
|
281 |
+
## 尝试 MCP(模型上下文协议)集成
|
282 |
+
|
283 |
+
体验 MCP 的强大功能,运行我们的示例来展示多智能体信息检索和处理:
|
284 |
+
|
285 |
+
```bash
|
286 |
+
# 设置 MCP 服务器(仅需一次性设置)
|
287 |
+
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
|
288 |
+
npx @wonderwhy-er/desktop-commander setup
|
289 |
+
|
290 |
+
# 运行 MCP 示例
|
291 |
+
python owl/run_mcp.py
|
292 |
+
```
|
293 |
+
|
294 |
+
这个示例展示了 OWL 智能体如何通过 MCP 协议无缝地与文件系统、网页自动化和信息检索进行交互。查看 `owl/run_mcp.py` 了解完整实现。
|
295 |
+
|
296 |
+
## 基本用法
|
297 |
|
298 |
运行以下示例:
|
299 |
|
|
|
307 |
python owl/run_mini.py
|
308 |
```
|
309 |
|
310 |
+
## 使用不同的模型
|
311 |
+
|
312 |
+
### 模型要求
|
313 |
+
|
314 |
+
- **工具调用能力**:OWL 需要具有强大工具调用能力的模型来与各种工具包交互。模型必须能够理解工具描述、生成适当的工具调用,并处理工具输出。
|
315 |
+
|
316 |
+
- **多模态理解能力**:对��涉及网页交互、图像分析或视频处理的任务,需要具备多模态能力的模型来解释视觉内容和上下文。
|
317 |
+
|
318 |
+
#### 支持的模型
|
319 |
+
|
320 |
+
有关配置模型的信息,请参阅我们的 [CAMEL 模型文档](https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel)。
|
321 |
+
|
322 |
+
> **注意**:为获得最佳性能,我们强烈推荐使用 OpenAI 模型(GPT-4 或更高版本)。我们的实验表明,其他模型在复杂任务和基准测试上可能表现明显较差,尤其是那些需要多模态理解和工具使用的任务。
|
323 |
+
|
324 |
+
OWL 支持多种 LLM 后端,但功能可能因模型的工具调用和多模态能力而异。您可以使用以下脚本来运行不同的模型:
|
325 |
+
|
326 |
+
```bash
|
327 |
+
# 使用 Qwen 模型运行
|
328 |
+
python owl/run_qwen_zh.py
|
329 |
+
|
330 |
+
# 使用 Deepseek 模型运行
|
331 |
+
python owl/run_deepseek_zh.py
|
332 |
+
|
333 |
+
# 使用其他 OpenAI 兼容模型运行
|
334 |
+
python owl/run_openai_compatiable_model.py
|
335 |
+
|
336 |
+
# 使用 Azure OpenAI模型运行
|
337 |
+
python owl/run_azure_openai.py
|
338 |
+
|
339 |
+
# 使用 Ollama 运行
|
340 |
+
python owl/run_ollama.py
|
341 |
+
```
|
342 |
+
|
343 |
你可以通过修改 `run.py` 脚本来运行自己的任务:
|
344 |
|
345 |
```python
|
|
|
349 |
society = construct_society(question)
|
350 |
answer, chat_history, token_count = run_society(society)
|
351 |
|
352 |
+
print(f"\033[94mAnswer: {answer}\033[0m")
|
353 |
```
|
354 |
|
355 |
+
上传文件时,只需提供文件路径和问题:
|
356 |
+
|
357 |
+
```python
|
358 |
+
# 处理本地文件(例如,文件路径为 `tmp/example.docx`)
|
359 |
+
question = "给定的 DOCX 文件中有什么内容?文件路径如下:tmp/example.docx"
|
360 |
+
|
361 |
+
society = construct_society(question)
|
362 |
+
answer, chat_history, token_count = run_society(society)
|
363 |
+
|
364 |
+
print(f"答案:{answer}")
|
365 |
+
```
|
366 |
+
|
367 |
+
OWL 将自动调用与文档相关的工具来处理文件并提取答案。
|
368 |
+
|
369 |
你可以尝试以下示例任务:
|
370 |
- "查询苹果公司的最新股票价格"
|
371 |
- "分析关于气候变化的最新推文情绪"
|
372 |
- "帮我调试这段 Python 代码:[在此粘贴你的代码]"
|
373 |
- "总结这篇研究论文的主要观点:[论文URL]"
|
374 |
+
|
375 |
+
# 🧰 工具包与功能
|
376 |
+
|
377 |
+
## 模型上下文协议(MCP)
|
378 |
+
|
379 |
+
OWL 的 MCP 集成为 AI 模型与各种工具和数据源的交互提供了标准化的方式。
|
380 |
+
|
381 |
+
查看我们的综合示例 `owl/run_mcp.py` 来体验这些功能!
|
382 |
+
|
383 |
+
## 可用工具包
|
384 |
+
|
385 |
+
> **重要提示**:有效使用工具包需要具备强大工具调用能力的模型。对于多模态工具包(Web、图像、视频),模型还必须具备多模态理解能力。
|
386 |
+
|
387 |
+
OWL支持多种工具包,可通过修改脚本中的`tools`列表进行自定义:
|
388 |
+
|
389 |
+
```python
|
390 |
+
# 配置工具包
|
391 |
+
tools = [
|
392 |
+
*BrowserToolkit(headless=False).get_tools(), # 浏览器自动化
|
393 |
+
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
394 |
+
*AudioAnalysisToolkit().get_tools(), # 需要OpenAI API密钥
|
395 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
396 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
397 |
+
SearchToolkit().search_duckduckgo,
|
398 |
+
SearchToolkit().search_google, # 如果不可用请注释
|
399 |
+
SearchToolkit().search_wiki,
|
400 |
+
*ExcelToolkit().get_tools(),
|
401 |
+
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
402 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
403 |
+
]
|
404 |
+
```
|
405 |
+
|
406 |
+
## 主要工具包
|
407 |
+
|
408 |
+
关键工具包包括:
|
409 |
+
|
410 |
+
### 多模态工具包(需要模型具备多模态能力)
|
411 |
+
- **BrowserToolkit**:浏览器自动化,用于网页交互和导航
|
412 |
+
- **VideoAnalysisToolkit**:视频处理和内容分析
|
413 |
+
- **ImageAnalysisToolkit**:图像分析和解释
|
414 |
+
|
415 |
+
### 基于文本的工具包
|
416 |
+
- **AudioAnalysisToolkit**:音频处理(需要 OpenAI API)
|
417 |
+
- **CodeExecutionToolkit**:Python 代码执行和评估
|
418 |
+
- **SearchToolkit**:网络搜索(Google、DuckDuckGo、维基百科)
|
419 |
+
- **DocumentProcessingToolkit**:文档解析(PDF、DOCX等)
|
420 |
+
|
421 |
+
其他专用工具包:ArxivToolkit、GitHubToolkit、GoogleMapsToolkit、MathToolkit、NetworkXToolkit、NotionToolkit、RedditToolkit、WeatherToolkit等。完整工具包列表请参阅[CAMEL工具包文档](https://docs.camel-ai.org/key_modules/tools.html#built-in-toolkits)。
|
422 |
+
|
423 |
+
## 自定义配置
|
424 |
+
|
425 |
+
自定义可用工具的方法:
|
426 |
+
|
427 |
+
```python
|
428 |
+
# 1. 导入工具包
|
429 |
+
from camel.toolkits import BrowserToolkit, SearchToolkit, CodeExecutionToolkit
|
430 |
+
|
431 |
+
# 2. 配置工具列表
|
432 |
+
tools = [
|
433 |
+
*BrowserToolkit(headless=True).get_tools(),
|
434 |
+
SearchToolkit().search_wiki,
|
435 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
436 |
+
]
|
437 |
+
|
438 |
+
# 3. 传递给助手代理
|
439 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
440 |
+
```
|
441 |
+
|
442 |
+
选择必要的工具包可优化性能并减少资源使用。
|
443 |
+
|
444 |
+
# 🌐 网页界面
|
445 |
+
|
446 |
+
OWL 现在包含一个基于网页的用户界面,使与系统交互变得更加容易。要启动网页界面,请运行:
|
447 |
+
|
448 |
+
```bash
|
449 |
+
# 中文版本
|
450 |
+
python run_app_zh.py
|
451 |
+
|
452 |
+
# 英文版本
|
453 |
+
python run_app.py
|
454 |
+
```
|
455 |
+
|
456 |
+
网页界面提供以下功能:
|
457 |
+
|
458 |
+
- **便捷的模型选择**:选择不同的模型(OpenAI、Qwen、DeepSeek等)
|
459 |
+
- **环境变量管理**:直接从界面配置API密钥和其他设置
|
460 |
+
- **交互式聊天界面**:通过用户友好的界面与OWL智能体交流
|
461 |
+
- **任务历史**:查看交互的历史记录和结果
|
462 |
+
|
463 |
+
网页界面使用Gradio构建,在您的本地机器上运行。除了您配置的模型API调用所需的数据外,不会向外部服务器发送任何数据。
|
464 |
+
|
465 |
# 🧪 实验
|
466 |
|
467 |
我们提供了一个脚本用于复现 GAIA 上的实验结果。
|
|
|
479 |
|
480 |
# ⏱️ 未来计划
|
481 |
|
482 |
+
我们正在不断努力改进 OWL。以下是我们的路线图:
|
|
|
|
|
483 |
|
484 |
+
- [ ] 撰写技术博客,详细介绍我们在现实任务中多智能体协作方面的探索与见解
|
485 |
+
- [ ] 通过引入更多针对特定领域任务的专业工具,进一步完善工具生态系统
|
486 |
+
- [ ] 开发更复杂的智能体交互模式和通信协议
|
487 |
+
- [ ] 提高复杂多步推理任务的性能
|
488 |
|
489 |
# 📄 许可证
|
490 |
|
|
|
505 |
}
|
506 |
```
|
507 |
|
508 |
+
# 🤝 贡献
|
509 |
+
|
510 |
+
我们欢迎社区的贡献!以下是您可以提供帮助的方式:
|
511 |
+
|
512 |
+
1. 阅读我们的[贡献指南](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md)
|
513 |
+
2. 查看[开放的问题](https://github.com/camel-ai/camel/issues)或创建新的问题
|
514 |
+
3. 提交包含您改进的拉取请求
|
515 |
+
|
516 |
+
**当前开放贡献的问题:**
|
517 |
+
- [#1857](https://github.com/camel-ai/camel/issues/1857)
|
518 |
+
- [#1770](https://github.com/camel-ai/camel/issues/1770)
|
519 |
+
- [#1712](https://github.com/camel-ai/camel/issues/1712)
|
520 |
+
- [#1537](https://github.com/camel-ai/camel/issues/1537)
|
521 |
+
|
522 |
+
要认领一个问题,只需在该问题下留言表明您的兴趣即可。
|
523 |
+
|
524 |
# 🔥 社区
|
525 |
+
加入我们的 ([*Discord*](https://discord.camel-ai.org/) 或 [*微信*](https://ghli.org/camel/wechat.png)) 社区,一起探索智能体扩展规律的边界。
|
526 |
+
|
527 |
加入我们,参与更多讨论!
|
528 |
<!--  -->
|
529 |

|
|
|
531 |
|
532 |
# ❓ 常见问题
|
533 |
|
534 |
+
**Q: 为什么启动示例脚本后,我没有看到本地运行Chrome浏览器?**
|
535 |
+
|
536 |
+
A: 当OWL判断某个任务可以使用非浏览器工具(如搜索、代码分析等)完成时,浏览器就不会启动。只有在判断需要使用浏览器工具的时候,本地才会弹出浏览器窗口,并进行浏览器模拟交互。
|
537 |
+
|
538 |
+
**Q: 我应该使用哪个Python版本?**
|
539 |
+
|
540 |
+
A: OWL支持Python 3.10、3.11和3.12。为了与所有依赖项获得最佳兼容性,我们推荐使用Python 3.10。
|
541 |
+
|
542 |
+
**Q: 我如何为项目做贡献?**
|
543 |
+
|
544 |
+
A: 请参阅我们的[贡献](#-贡献)部分,了解如何参与的详细信息。我们欢迎各种形式的贡献,从代码改进到文档更新。
|
545 |
+
|
546 |
+
# 📚 探索 CAMEL 依赖
|
547 |
+
|
548 |
+
OWL 是基于 [CAMEL](https://github.com/camel-ai/camel) 框架构建的,以下是如何探索 CAMEL 源代码并了解其与 OWL 的工作方式:
|
549 |
+
|
550 |
+
## 访问 CAMEL 源代码
|
551 |
+
|
552 |
+
```bash
|
553 |
+
# 克隆 CAMEL 仓库
|
554 |
+
git clone https://github.com/camel-ai/camel.git
|
555 |
+
cd camel
|
556 |
+
```
|
557 |
|
558 |
+
# ⭐ Star History
|
559 |
|
560 |
+
[](https://star-history.com/#camel-ai/owl&Date)
|
561 |
|
562 |
[docs-image]: https://img.shields.io/badge/Documentation-EB3ECC
|
563 |
[docs-url]: https://camel-ai.github.io/camel/index.html
|
community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 🦉 OWL Community Call for Use Cases
|
2 |
+
# 🦉 OWL 社区用例征集令
|
3 |
+
|
4 |
+
<div align="center">
|
5 |
+
|
6 |
+
[![Documentation][docs-image]][docs-url]
|
7 |
+
[![Discord][discord-image]][discord-url]
|
8 |
+
[![X][x-image]][x-url]
|
9 |
+
[![Reddit][reddit-image]][reddit-url]
|
10 |
+
[![Wechat][wechat-image]][wechat-url]
|
11 |
+
[![Star][star-image]][star-url]
|
12 |
+
|
13 |
+
</div>
|
14 |
+
|
15 |
+
<div align="center">
|
16 |
+
<h4 align="center">
|
17 |
+
|
18 |
+
[English](#join-the-owl-community-contribute-your-use-cases) | [中文](#加入owl社区贡献您的用例)
|
19 |
+
|
20 |
+
</h4>
|
21 |
+
</div>
|
22 |
+
|
23 |
+
## Join the OWL Community: Contribute Your Use Cases!
|
24 |
+
|
25 |
+
Dear OWL Community,
|
26 |
+
|
27 |
+
We are excited to announce a special initiative to expand the capabilities and applications of the OWL framework! As the #1 ranked open-source multi-agent collaboration framework on the [GAIA benchmark](https://huggingface.co/spaces/gaia-benchmark/leaderboard), OWL is revolutionizing how AI agents collaborate to solve real-world tasks.
|
28 |
+
|
29 |
+
### 🌟 What We're Looking For
|
30 |
+
|
31 |
+
We invite you to contribute use cases that demonstrate the power and versatility of OWL in two ways:
|
32 |
+
|
33 |
+
1. **Leverage Existing Tools and Models**: Create innovative use cases using OWL's supported tools and models, then submit a PR to our repository.
|
34 |
+
2. **Extend OWL's Capabilities**: Develop new tools that expand OWL's functionality to implement your own unique use cases.
|
35 |
+
|
36 |
+
### 🏆 Community Rewards
|
37 |
+
|
38 |
+
The **top ten submissions** will receive:
|
39 |
+
- Special community gifts
|
40 |
+
- Featured promotion within the OWL community
|
41 |
+
- Recognition of your contributions and authorship
|
42 |
+
|
43 |
+
### 💡 Submission Guidelines
|
44 |
+
|
45 |
+
Your submission should include:
|
46 |
+
|
47 |
+
1. **Well-documented code**: Clear comments and instructions for running your use case
|
48 |
+
2. **Description file**: Explaining what your use case does and why it's valuable
|
49 |
+
3. **Requirements**: Any additional dependencies needed
|
50 |
+
4. **Example outputs**: Demonstrations of your use case in action
|
51 |
+
|
52 |
+
### 🔍 Evaluation Criteria
|
53 |
+
|
54 |
+
Submissions will be evaluated based on:
|
55 |
+
- **Innovation**: How creative and novel is your use case?
|
56 |
+
- **Utility**: How useful is it for real-world applications?
|
57 |
+
- **Implementation**: How well is it coded and documented?
|
58 |
+
- **Extensibility**: How easily can others build upon your work?
|
59 |
+
- **Community Engagement**: Sharing your use case on social media platforms (Zhihu, Xiaohongshu, X/Twitter, YouTube, etc.) will earn you extra points
|
60 |
+
|
61 |
+
### 📝 How to Submit
|
62 |
+
|
63 |
+
1. Fork the OWL repository
|
64 |
+
2. Create your use case in the `examples/community/` directory
|
65 |
+
3. Submit a Pull Request with a detailed description of your contribution
|
66 |
+
4. Tag your PR with `community-use-case`
|
67 |
+
|
68 |
+
### ⏰ Timeline
|
69 |
+
|
70 |
+
- Submission deadline: March 31, 2025
|
71 |
+
- Winners announcement: April 7, 2025
|
72 |
+
|
73 |
+
### 🚀 Inspiration Areas
|
74 |
+
|
75 |
+
Consider exploring use cases in:
|
76 |
+
- Data analysis and visualization
|
77 |
+
- Content creation and summarization
|
78 |
+
- Research assistance
|
79 |
+
- Educational tools
|
80 |
+
- Business process automation
|
81 |
+
- Creative applications
|
82 |
+
- Cross-modal interactions (text, image, audio, video)
|
83 |
+
|
84 |
+
### 🤝 Community Support
|
85 |
+
|
86 |
+
Need help or have questions? Join our community channels:
|
87 |
+
- [Discord](https://discord.gg/CNcNpquyDc)
|
88 |
+
- [GitHub Discussions](https://github.com/camel-ai/owl/discussions)
|
89 |
+
|
90 |
+
Let's build the future of multi-agent AI together!
|
91 |
+
|
92 |
+
---
|
93 |
+
|
94 |
+
## 加入OWL社区:贡献您的用例!
|
95 |
+
|
96 |
+
亲爱的OWL社区成员,
|
97 |
+
|
98 |
+
我们很高兴宣布一项特别计划,旨在扩展OWL框架的功能和应用!作为在[GAIA基准测试](https://huggingface.co/spaces/gaia-benchmark/leaderboard)中排名第一的开源多智能体协作框架,OWL正在彻底改变AI智能体协作解决现实任务的方式。
|
99 |
+
|
100 |
+
### 🌟 我们在寻找什么
|
101 |
+
|
102 |
+
我们邀请您通过以下两种方式贡献展示OWL强大功能和多样性的用例:
|
103 |
+
|
104 |
+
1. **利用现有工具和模型**:使用OWL支持的工具和模型创建创新用例,然后向我们的仓库提交PR。
|
105 |
+
2. **扩展OWL的功能**:开发新工具,扩展OWL的功能,实现您自己独特的用例。
|
106 |
+
|
107 |
+
### 🏆 社区奖励
|
108 |
+
|
109 |
+
**前十名**将获得:
|
110 |
+
- 特别社区礼物
|
111 |
+
- 在OWL社区内的推广展示
|
112 |
+
- 对您贡献和作者身份的认可
|
113 |
+
|
114 |
+
### 💡 提交指南
|
115 |
+
|
116 |
+
您的提交应包括:
|
117 |
+
|
118 |
+
1. **文档完善的代码**:清晰的注释和运行用例的说明
|
119 |
+
2. **描述文件**:解释您的用例做什么以及为什么它有价值
|
120 |
+
3. **依赖要求**:需要的任何额外依赖
|
121 |
+
4. **示例输出**:展示您的用例实际运行效果
|
122 |
+
|
123 |
+
### 🔍 评估标准
|
124 |
+
|
125 |
+
提交将基于以下标准进行评估:
|
126 |
+
- **创新性**:您的用例有多创新和新颖?
|
127 |
+
- **实用性**:它对现实世界应用有多大用处?
|
128 |
+
- **实现质量**:代码和文档的质量如何?
|
129 |
+
- **可扩展性**:其他人能多容易地在您的工作基础上进行扩展?
|
130 |
+
- **社区参与度**:在社交媒体平台(知乎、小红书、X/Twitter、YouTube等)分享您的用例将获得额外加分
|
131 |
+
|
132 |
+
### 📝 如何提交
|
133 |
+
|
134 |
+
1. Fork OWL仓库
|
135 |
+
2. 在`community_usecase/`目录中创建您的用例
|
136 |
+
3. 提交一个包含您贡献详细描述的Pull Request
|
137 |
+
4. ���用`community-use-case`标签标记您的PR
|
138 |
+
|
139 |
+
### ⏰ 时间线
|
140 |
+
|
141 |
+
- 提交截止日期:2025年3月31日
|
142 |
+
- 获奖者公布:2025年4月7日
|
143 |
+
|
144 |
+
### 🚀 灵感领域
|
145 |
+
|
146 |
+
考虑探索以下领域的用例:
|
147 |
+
- 数据分析和可视化
|
148 |
+
- 内容创建和摘要
|
149 |
+
- 研究辅助
|
150 |
+
- 教育工具
|
151 |
+
- 业务流程自动化
|
152 |
+
- 创意应用
|
153 |
+
- 跨模态交互(文本、图像、音频、视频)
|
154 |
+
|
155 |
+
### 🤝 社区支持
|
156 |
+
|
157 |
+
需要帮助或有问题?加入我们的社区渠道:
|
158 |
+
- [Discord](https://discord.gg/CNcNpquyDc)
|
159 |
+
- [GitHub讨论](https://github.com/camel-ai/owl/discussions)
|
160 |
+
|
161 |
+
让我们一起构建多智能体AI的未来!
|
162 |
+
|
163 |
+
<!-- Links and badges -->
|
164 |
+
[docs-image]: https://img.shields.io/badge/docs-OWL-blue
|
165 |
+
[docs-url]: https://docs.camel-ai.org/
|
166 |
+
[discord-image]: https://img.shields.io/discord/1135106975706013747?color=7289da&label=Discord&logo=discord&logoColor=white
|
167 |
+
[discord-url]: https://discord.gg/CNcNpquyDc
|
168 |
+
[x-image]: https://img.shields.io/badge/Twitter-black?logo=x
|
169 |
+
[x-url]: https://twitter.com/CamelAIOrg
|
170 |
+
[reddit-image]: https://img.shields.io/badge/Reddit-FF4500?logo=reddit&logoColor=white
|
171 |
+
[reddit-url]: https://www.reddit.com/r/camelai/
|
172 |
+
[wechat-image]: https://img.shields.io/badge/WeChat-07C160?logo=wechat&logoColor=white
|
173 |
+
[wechat-url]: https://docs.camel-ai.org/blog/2023/11/29/camel-wechat/
|
174 |
+
[star-image]: https://img.shields.io/github/stars/camel-ai/owl?style=social
|
175 |
+
[star-url]: https://github.com/camel-ai/owl
|
licenses/update_license.py
CHANGED
@@ -39,43 +39,37 @@ def update_license_in_file(
|
|
39 |
start_line_start_with: str,
|
40 |
end_line_start_with: str,
|
41 |
) -> bool:
|
42 |
-
with open(
|
43 |
-
file_path, 'r', encoding='utf-8'
|
44 |
-
) as f: # for windows compatibility
|
45 |
content = f.read()
|
46 |
|
47 |
-
with open(license_template_path,
|
48 |
new_license = f.read().strip()
|
49 |
|
50 |
maybe_existing_licenses = re.findall(
|
51 |
-
r
|
52 |
)
|
53 |
start_index = fine_license_start_line(
|
54 |
maybe_existing_licenses, start_line_start_with
|
55 |
)
|
56 |
-
end_index = find_license_end_line(
|
57 |
-
maybe_existing_licenses, end_line_start_with
|
58 |
-
)
|
59 |
if start_index is not None and end_index is not None:
|
60 |
-
maybe_existing_licenses = maybe_existing_licenses[
|
61 |
-
start_index : end_index + 1
|
62 |
-
]
|
63 |
else:
|
64 |
maybe_existing_licenses = None
|
65 |
if maybe_existing_licenses:
|
66 |
-
maybe_old_licenses =
|
67 |
if maybe_old_licenses.strip() != new_license.strip():
|
68 |
replaced_content = content.replace(maybe_old_licenses, new_license)
|
69 |
-
with open(file_path,
|
70 |
f.write(replaced_content)
|
71 |
-
print(f
|
72 |
return True
|
73 |
else:
|
74 |
return False
|
75 |
else:
|
76 |
-
with open(file_path,
|
77 |
-
f.write(new_license +
|
78 |
-
print(f
|
79 |
return True
|
80 |
|
81 |
|
@@ -87,16 +81,16 @@ def update_license_in_directory(
|
|
87 |
) -> None:
|
88 |
# Check if directory exists
|
89 |
if not os.path.isdir(directory_path):
|
90 |
-
raise NotADirectoryError(f
|
91 |
# Check if license template exists
|
92 |
if not os.path.isfile(license_template_path):
|
93 |
-
raise FileNotFoundError(f
|
94 |
|
95 |
file_count = 0
|
96 |
for py_files in Path(directory_path).rglob("*.py"):
|
97 |
-
if py_files.name.startswith(
|
98 |
continue
|
99 |
-
if any(part.startswith(
|
100 |
continue
|
101 |
if update_license_in_file(
|
102 |
py_files,
|
@@ -106,10 +100,10 @@ def update_license_in_directory(
|
|
106 |
):
|
107 |
file_count += 1
|
108 |
|
109 |
-
print(f
|
110 |
|
111 |
|
112 |
-
if __name__ ==
|
113 |
if len(sys.argv) < 3:
|
114 |
print(
|
115 |
"Usage from command line: "
|
|
|
39 |
start_line_start_with: str,
|
40 |
end_line_start_with: str,
|
41 |
) -> bool:
|
42 |
+
with open(file_path, "r", encoding="utf-8") as f: # for windows compatibility
|
|
|
|
|
43 |
content = f.read()
|
44 |
|
45 |
+
with open(license_template_path, "r", encoding="utf-8") as f:
|
46 |
new_license = f.read().strip()
|
47 |
|
48 |
maybe_existing_licenses = re.findall(
|
49 |
+
r"^#.*?(?=\n)", content, re.MULTILINE | re.DOTALL
|
50 |
)
|
51 |
start_index = fine_license_start_line(
|
52 |
maybe_existing_licenses, start_line_start_with
|
53 |
)
|
54 |
+
end_index = find_license_end_line(maybe_existing_licenses, end_line_start_with)
|
|
|
|
|
55 |
if start_index is not None and end_index is not None:
|
56 |
+
maybe_existing_licenses = maybe_existing_licenses[start_index : end_index + 1]
|
|
|
|
|
57 |
else:
|
58 |
maybe_existing_licenses = None
|
59 |
if maybe_existing_licenses:
|
60 |
+
maybe_old_licenses = "\n".join(maybe_existing_licenses)
|
61 |
if maybe_old_licenses.strip() != new_license.strip():
|
62 |
replaced_content = content.replace(maybe_old_licenses, new_license)
|
63 |
+
with open(file_path, "w") as f:
|
64 |
f.write(replaced_content)
|
65 |
+
print(f"Replaced license in {file_path}")
|
66 |
return True
|
67 |
else:
|
68 |
return False
|
69 |
else:
|
70 |
+
with open(file_path, "w") as f:
|
71 |
+
f.write(new_license + "\n" + content)
|
72 |
+
print(f"Added license to {file_path}")
|
73 |
return True
|
74 |
|
75 |
|
|
|
81 |
) -> None:
|
82 |
# Check if directory exists
|
83 |
if not os.path.isdir(directory_path):
|
84 |
+
raise NotADirectoryError(f"{directory_path} is not a directory")
|
85 |
# Check if license template exists
|
86 |
if not os.path.isfile(license_template_path):
|
87 |
+
raise FileNotFoundError(f"{license_template_path} not found")
|
88 |
|
89 |
file_count = 0
|
90 |
for py_files in Path(directory_path).rglob("*.py"):
|
91 |
+
if py_files.name.startswith("."):
|
92 |
continue
|
93 |
+
if any(part.startswith(".") for part in py_files.parts):
|
94 |
continue
|
95 |
if update_license_in_file(
|
96 |
py_files,
|
|
|
100 |
):
|
101 |
file_count += 1
|
102 |
|
103 |
+
print(f"License updated in {file_count} files")
|
104 |
|
105 |
|
106 |
+
if __name__ == "__main__":
|
107 |
if len(sys.argv) < 3:
|
108 |
print(
|
109 |
"Usage from command line: "
|
owl/.env_template
CHANGED
@@ -1,8 +1,15 @@
|
|
1 |
-
# MODEL & API (See https://
|
2 |
|
3 |
# OPENAI API
|
4 |
-
OPENAI_API_KEY
|
5 |
-
# OPENAI_API_BASE_URL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
|
8 |
# QWEN_API_KEY=""
|
@@ -26,3 +33,4 @@ CHUNKR_API_KEY=""
|
|
26 |
|
27 |
# Firecrawl API (https://www.firecrawl.dev/)
|
28 |
FIRECRAWL_API_KEY=""
|
|
|
|
1 |
+
# MODEL & API (See https://docs.camel-ai.org/key_modules/models.html#)
|
2 |
|
3 |
# OPENAI API
|
4 |
+
# OPENAI_API_KEY= ""
|
5 |
+
# OPENAI_API_BASE_URL=""
|
6 |
+
|
7 |
+
# Azure OpenAI API
|
8 |
+
# AZURE_OPENAI_BASE_URL=""
|
9 |
+
# AZURE_API_VERSION=""
|
10 |
+
# AZURE_OPENAI_API_KEY=""
|
11 |
+
# AZURE_DEPLOYMENT_NAME=""
|
12 |
+
|
13 |
|
14 |
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
|
15 |
# QWEN_API_KEY=""
|
|
|
33 |
|
34 |
# Firecrawl API (https://www.firecrawl.dev/)
|
35 |
FIRECRAWL_API_KEY=""
|
36 |
+
#FIRECRAWL_API_URL="https://api.firecrawl.dev"
|
owl/app.py
ADDED
@@ -0,0 +1,921 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
import gradio as gr
|
17 |
+
import subprocess
|
18 |
+
import threading
|
19 |
+
import time
|
20 |
+
from datetime import datetime
|
21 |
+
import queue
|
22 |
+
from pathlib import Path
|
23 |
+
import json
|
24 |
+
import signal
|
25 |
+
import dotenv
|
26 |
+
|
27 |
+
# 设置日志队列
|
28 |
+
log_queue: queue.Queue[str] = queue.Queue()
|
29 |
+
|
30 |
+
# 当前运行的进程
|
31 |
+
current_process = None
|
32 |
+
process_lock = threading.Lock()
|
33 |
+
|
34 |
+
# 脚本选项
|
35 |
+
SCRIPTS = {
|
36 |
+
"Qwen Mini (中文)": "run_qwen_mini_zh.py",
|
37 |
+
"Qwen (中文)": "run_qwen_zh.py",
|
38 |
+
"Mini": "run_mini.py",
|
39 |
+
"DeepSeek (中文)": "run_deepseek_zh.py",
|
40 |
+
"Default": "run.py",
|
41 |
+
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
42 |
+
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
43 |
+
"Azure OpenAI": "run_azure_openai.py",
|
44 |
+
"Ollama": "run_ollama.py",
|
45 |
+
"Terminal": "run_terminal_zh.py",
|
46 |
+
}
|
47 |
+
|
48 |
+
# 脚本描述
|
49 |
+
SCRIPT_DESCRIPTIONS = {
|
50 |
+
"Qwen Mini (中文)": "使用阿里云Qwen模型的中文版本,适合中文问答和任务",
|
51 |
+
"Qwen (中文)": "使用阿里云Qwen模型,支持多种工具和功能",
|
52 |
+
"Mini": "轻量级版本,使用OpenAI GPT-4o模型",
|
53 |
+
"DeepSeek (中文)": "使用DeepSeek模型,适合非多模态任务",
|
54 |
+
"Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
|
55 |
+
"GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
|
56 |
+
"OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
|
57 |
+
"Azure OpenAI": "使用Azure OpenAI API",
|
58 |
+
"Ollama": "使用Ollama API",
|
59 |
+
"Terminal": "使用本地终端执行python文件",
|
60 |
+
}
|
61 |
+
|
62 |
+
# 环境变量分组
|
63 |
+
ENV_GROUPS = {
|
64 |
+
"模型API": [
|
65 |
+
{
|
66 |
+
"name": "OPENAI_API_KEY",
|
67 |
+
"label": "OpenAI API密钥",
|
68 |
+
"type": "password",
|
69 |
+
"required": False,
|
70 |
+
"help": "OpenAI API密钥,用于访问GPT模型。获取方式:https://platform.openai.com/api-keys",
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"name": "OPENAI_API_BASE_URL",
|
74 |
+
"label": "OpenAI API基础URL",
|
75 |
+
"type": "text",
|
76 |
+
"required": False,
|
77 |
+
"help": "OpenAI API的基础URL,可选。如果使用代理或自定义端点,请设置此项。",
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"name": "AZURE_OPENAI_KEY",
|
81 |
+
"label": "Azure OpenAI API密钥",
|
82 |
+
"type": "password",
|
83 |
+
"required": False,
|
84 |
+
"help": "Azure OpenAI API密钥,用于访问Azure部署的GPT模型",
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"name": "AZURE_OPENAI_ENDPOINT",
|
88 |
+
"label": "Azure OpenAI端点",
|
89 |
+
"type": "text",
|
90 |
+
"required": False,
|
91 |
+
"help": "Azure OpenAI服务的端点URL",
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"name": "AZURE_DEPLOYMENT_NAME",
|
95 |
+
"label": "Azure OpenAI部署名称",
|
96 |
+
"type": "text",
|
97 |
+
"required": False,
|
98 |
+
"help": "Azure OpenAI服务的部署名称",
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"name": "AZURE_OPENAI_VERSION",
|
102 |
+
"label": "Azure OpenAI API版本",
|
103 |
+
"type": "text",
|
104 |
+
"required": False,
|
105 |
+
"help": "Azure OpenAI API版本,例如:2023-12-01-preview",
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"name": "QWEN_API_KEY",
|
109 |
+
"label": "阿里云Qwen API密钥",
|
110 |
+
"type": "password",
|
111 |
+
"required": False,
|
112 |
+
"help": "阿里云Qwen API密钥,用于访问Qwen模型。获取方式:https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"name": "DEEPSEEK_API_KEY",
|
116 |
+
"label": "DeepSeek API密钥",
|
117 |
+
"type": "password",
|
118 |
+
"required": False,
|
119 |
+
"help": "DeepSeek API密钥,用于访问DeepSeek模型。获取方式:https://platform.deepseek.com/api_keys",
|
120 |
+
},
|
121 |
+
],
|
122 |
+
"搜索工具": [
|
123 |
+
{
|
124 |
+
"name": "GOOGLE_API_KEY",
|
125 |
+
"label": "Google API密钥",
|
126 |
+
"type": "password",
|
127 |
+
"required": False,
|
128 |
+
"help": "Google搜索API密钥,用于网络搜索功能。获取方式:https://developers.google.com/custom-search/v1/overview",
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"name": "SEARCH_ENGINE_ID",
|
132 |
+
"label": "搜索引擎ID",
|
133 |
+
"type": "text",
|
134 |
+
"required": False,
|
135 |
+
"help": "Google自定义搜索引擎ID,与Google API密钥配合使用。获取方式:https://developers.google.com/custom-search/v1/overview",
|
136 |
+
},
|
137 |
+
],
|
138 |
+
"其他工具": [
|
139 |
+
{
|
140 |
+
"name": "HF_TOKEN",
|
141 |
+
"label": "Hugging Face令牌",
|
142 |
+
"type": "password",
|
143 |
+
"required": False,
|
144 |
+
"help": "Hugging Face API令牌,用于访问Hugging Face模型和数据集。获取方式:https://huggingface.co/join",
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"name": "CHUNKR_API_KEY",
|
148 |
+
"label": "Chunkr API密钥",
|
149 |
+
"type": "password",
|
150 |
+
"required": False,
|
151 |
+
"help": "Chunkr API密钥,用于文档处理功能。获取方式:https://chunkr.ai/",
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"name": "FIRECRAWL_API_KEY",
|
155 |
+
"label": "Firecrawl API密钥",
|
156 |
+
"type": "password",
|
157 |
+
"required": False,
|
158 |
+
"help": "Firecrawl API密钥,用于网页爬取功能。获取方式:https://www.firecrawl.dev/",
|
159 |
+
},
|
160 |
+
],
|
161 |
+
"自定义环境变量": [], # 用户自定义的环境变量将存储在这里
|
162 |
+
}
|
163 |
+
|
164 |
+
|
165 |
+
def get_script_info(script_name):
|
166 |
+
"""获取脚本的详细信息"""
|
167 |
+
return SCRIPT_DESCRIPTIONS.get(script_name, "无描述信息")
|
168 |
+
|
169 |
+
|
170 |
+
def load_env_vars():
|
171 |
+
"""加载环境变量"""
|
172 |
+
env_vars = {}
|
173 |
+
# 尝试从.env文件加载
|
174 |
+
dotenv.load_dotenv()
|
175 |
+
|
176 |
+
# 获取所有环境变量
|
177 |
+
for group in ENV_GROUPS.values():
|
178 |
+
for var in group:
|
179 |
+
env_vars[var["name"]] = os.environ.get(var["name"], "")
|
180 |
+
|
181 |
+
# 加载.env文件中可能存在的其他环境变量
|
182 |
+
if Path(".env").exists():
|
183 |
+
try:
|
184 |
+
with open(".env", "r", encoding="utf-8") as f:
|
185 |
+
for line in f:
|
186 |
+
line = line.strip()
|
187 |
+
if line and not line.startswith("#") and "=" in line:
|
188 |
+
try:
|
189 |
+
key, value = line.split("=", 1)
|
190 |
+
key = key.strip()
|
191 |
+
value = value.strip()
|
192 |
+
|
193 |
+
# 处理引号包裹的值
|
194 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
195 |
+
value.startswith("'") and value.endswith("'")
|
196 |
+
):
|
197 |
+
value = value[1:-1] # 移除首尾的引号
|
198 |
+
|
199 |
+
# 检查是否是已知的环境变量
|
200 |
+
known_var = False
|
201 |
+
for group in ENV_GROUPS.values():
|
202 |
+
if any(var["name"] == key for var in group):
|
203 |
+
known_var = True
|
204 |
+
break
|
205 |
+
|
206 |
+
# 如果不是已知的环境变量,添加到自定义环境变量组
|
207 |
+
if not known_var and key not in env_vars:
|
208 |
+
ENV_GROUPS["自定义环境变量"].append(
|
209 |
+
{
|
210 |
+
"name": key,
|
211 |
+
"label": key,
|
212 |
+
"type": "text",
|
213 |
+
"required": False,
|
214 |
+
"help": "用户自定义环境变量",
|
215 |
+
}
|
216 |
+
)
|
217 |
+
env_vars[key] = value
|
218 |
+
except Exception as e:
|
219 |
+
print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
|
220 |
+
except Exception as e:
|
221 |
+
print(f"加载.env文件时出错: {str(e)}")
|
222 |
+
|
223 |
+
return env_vars
|
224 |
+
|
225 |
+
|
226 |
+
def save_env_vars(env_vars):
|
227 |
+
"""保存环境变量到.env文件"""
|
228 |
+
# 读取现有的.env文件内容
|
229 |
+
env_path = Path(".env")
|
230 |
+
existing_content = {}
|
231 |
+
|
232 |
+
if env_path.exists():
|
233 |
+
try:
|
234 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
235 |
+
for line in f:
|
236 |
+
line = line.strip()
|
237 |
+
if line and not line.startswith("#") and "=" in line:
|
238 |
+
try:
|
239 |
+
key, value = line.split("=", 1)
|
240 |
+
existing_content[key.strip()] = value.strip()
|
241 |
+
except Exception as e:
|
242 |
+
print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
|
243 |
+
except Exception as e:
|
244 |
+
print(f"读取.env文件时出错: {str(e)}")
|
245 |
+
|
246 |
+
# 更新环境变量
|
247 |
+
for key, value in env_vars.items():
|
248 |
+
if value is not None: # 允许空字符串值,但不允许None
|
249 |
+
# 确保值是字符串形式
|
250 |
+
value = str(value) # 确保值是字符串
|
251 |
+
|
252 |
+
# 检查值是否已经被引号包裹
|
253 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
254 |
+
value.startswith("'") and value.endswith("'")
|
255 |
+
):
|
256 |
+
# 已经被引号包裹,保持原样
|
257 |
+
existing_content[key] = value
|
258 |
+
# 更新环境变量时移除引号
|
259 |
+
os.environ[key] = value[1:-1]
|
260 |
+
else:
|
261 |
+
# 没有被引号包裹,添加双引号
|
262 |
+
# 用双引号包裹值,确保特殊字符被正确处理
|
263 |
+
quoted_value = f'"{value}"'
|
264 |
+
existing_content[key] = quoted_value
|
265 |
+
# 同时更新当前进程的环境变量(使用未引用的值)
|
266 |
+
os.environ[key] = value
|
267 |
+
|
268 |
+
# 写入.env文件
|
269 |
+
try:
|
270 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
271 |
+
for key, value in existing_content.items():
|
272 |
+
f.write(f"{key}={value}\n")
|
273 |
+
except Exception as e:
|
274 |
+
print(f"写入.env文件时出错: {str(e)}")
|
275 |
+
return f"❌ 保存环境变量失败: {str(e)}"
|
276 |
+
|
277 |
+
return "✅ 环境变量已保存"
|
278 |
+
|
279 |
+
|
280 |
+
def add_custom_env_var(name, value, var_type):
|
281 |
+
"""添加自定义环境变量"""
|
282 |
+
if not name:
|
283 |
+
return "❌ 环境变量名不能为空", None
|
284 |
+
|
285 |
+
# 检查是否已存在同名环境变量
|
286 |
+
for group in ENV_GROUPS.values():
|
287 |
+
if any(var["name"] == name for var in group):
|
288 |
+
return f"❌ 环境变量 {name} 已存在", None
|
289 |
+
|
290 |
+
# 添加到自定义环境变量组
|
291 |
+
ENV_GROUPS["自定义环境变量"].append(
|
292 |
+
{
|
293 |
+
"name": name,
|
294 |
+
"label": name,
|
295 |
+
"type": var_type,
|
296 |
+
"required": False,
|
297 |
+
"help": "用户自定义环境变量",
|
298 |
+
}
|
299 |
+
)
|
300 |
+
|
301 |
+
# 保存环境变量
|
302 |
+
env_vars = {name: value}
|
303 |
+
save_env_vars(env_vars)
|
304 |
+
|
305 |
+
# 返回成功消息和更新后的环境变量组
|
306 |
+
return f"✅ 已添加环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
307 |
+
|
308 |
+
|
309 |
+
def update_custom_env_var(name, value, var_type):
|
310 |
+
"""更改自定义环境变量"""
|
311 |
+
if not name:
|
312 |
+
return "❌ 环境变量名不能为空", None
|
313 |
+
|
314 |
+
# 检查环境变量是否存在于自定义环境变量组中
|
315 |
+
found = False
|
316 |
+
for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
|
317 |
+
if var["name"] == name:
|
318 |
+
# 更新类型
|
319 |
+
ENV_GROUPS["自定义环境变量"][i]["type"] = var_type
|
320 |
+
found = True
|
321 |
+
break
|
322 |
+
|
323 |
+
if not found:
|
324 |
+
return f"❌ 自定义环境变量 {name} 不存在", None
|
325 |
+
|
326 |
+
# 保存环境变量值
|
327 |
+
env_vars = {name: value}
|
328 |
+
save_env_vars(env_vars)
|
329 |
+
|
330 |
+
# 返回成功消息和更新后的环境变量组
|
331 |
+
return f"✅ 已更新环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
332 |
+
|
333 |
+
|
334 |
+
def delete_custom_env_var(name):
|
335 |
+
"""删除自定义环境变量"""
|
336 |
+
if not name:
|
337 |
+
return "❌ 环境变量名不能为空", None
|
338 |
+
|
339 |
+
# 检查环境变量是否存在于自定义环境变量组中
|
340 |
+
found = False
|
341 |
+
for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
|
342 |
+
if var["name"] == name:
|
343 |
+
# 从自定义环境变量组中删除
|
344 |
+
del ENV_GROUPS["自定义环境变量"][i]
|
345 |
+
found = True
|
346 |
+
break
|
347 |
+
|
348 |
+
if not found:
|
349 |
+
return f"❌ 自定义环境变量 {name} 不存在", None
|
350 |
+
|
351 |
+
# 从.env文件中删除该环境变量
|
352 |
+
env_path = Path(".env")
|
353 |
+
if env_path.exists():
|
354 |
+
try:
|
355 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
356 |
+
lines = f.readlines()
|
357 |
+
|
358 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
359 |
+
for line in lines:
|
360 |
+
try:
|
361 |
+
# 更精确地匹配环境变量行
|
362 |
+
line_stripped = line.strip()
|
363 |
+
# 检查是否为注释行或空行
|
364 |
+
if not line_stripped or line_stripped.startswith("#"):
|
365 |
+
f.write(line) # 保留注释行和空行
|
366 |
+
continue
|
367 |
+
|
368 |
+
# 检查是否包含等号
|
369 |
+
if "=" not in line_stripped:
|
370 |
+
f.write(line) # 保留不包含等号的行
|
371 |
+
continue
|
372 |
+
|
373 |
+
# 提取变量名并检查是否与要删除的变量匹配
|
374 |
+
var_name = line_stripped.split("=", 1)[0].strip()
|
375 |
+
if var_name != name:
|
376 |
+
f.write(line) # 保留不匹配的变量
|
377 |
+
except Exception as e:
|
378 |
+
print(f"处理.env文件行时出错: {line}, 错误: {str(e)}")
|
379 |
+
# 出错时保留原行
|
380 |
+
f.write(line)
|
381 |
+
except Exception as e:
|
382 |
+
print(f"删除环境变量时出错: {str(e)}")
|
383 |
+
return f"❌ 删除环境变量失败: {str(e)}", None
|
384 |
+
|
385 |
+
# 从当前进程的环境变量中删除
|
386 |
+
if name in os.environ:
|
387 |
+
del os.environ[name]
|
388 |
+
|
389 |
+
# 返回成功消息和更新后的环境变量组
|
390 |
+
return f"✅ 已删除环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
391 |
+
|
392 |
+
|
393 |
+
def terminate_process():
|
394 |
+
"""终止当前运行的进程"""
|
395 |
+
global current_process
|
396 |
+
|
397 |
+
with process_lock:
|
398 |
+
if current_process is not None and current_process.poll() is None:
|
399 |
+
try:
|
400 |
+
# 在Windows上使用taskkill强制终止进程树
|
401 |
+
if os.name == "nt":
|
402 |
+
# 获取进程ID
|
403 |
+
pid = current_process.pid
|
404 |
+
# 使用taskkill命令终止进程及其子进程 - 避免使用shell=True以提高安全性
|
405 |
+
try:
|
406 |
+
subprocess.run(
|
407 |
+
["taskkill", "/F", "/T", "/PID", str(pid)], check=False
|
408 |
+
)
|
409 |
+
except subprocess.SubprocessError as e:
|
410 |
+
log_queue.put(f"终止进程时出错: {str(e)}\n")
|
411 |
+
return f"❌ 终止进程时出错: {str(e)}"
|
412 |
+
else:
|
413 |
+
# 在Unix上使用SIGTERM和SIGKILL
|
414 |
+
current_process.terminate()
|
415 |
+
try:
|
416 |
+
current_process.wait(timeout=3)
|
417 |
+
except subprocess.TimeoutExpired:
|
418 |
+
current_process.kill()
|
419 |
+
|
420 |
+
# 等待进程终止
|
421 |
+
try:
|
422 |
+
current_process.wait(timeout=2)
|
423 |
+
except subprocess.TimeoutExpired:
|
424 |
+
pass # 已经尝试强制终止,忽略超时
|
425 |
+
|
426 |
+
log_queue.put("进程已终止\n")
|
427 |
+
return "✅ 进程已终止"
|
428 |
+
except Exception as e:
|
429 |
+
log_queue.put(f"终止进程时出错: {str(e)}\n")
|
430 |
+
return f"❌ 终止进程时出错: {str(e)}"
|
431 |
+
else:
|
432 |
+
return "❌ 没有正在运行的进程"
|
433 |
+
|
434 |
+
|
435 |
+
def run_script(script_dropdown, question, progress=gr.Progress()):
|
436 |
+
"""运行选定的脚本并返回输出"""
|
437 |
+
global current_process
|
438 |
+
|
439 |
+
script_name = SCRIPTS.get(script_dropdown)
|
440 |
+
if not script_name:
|
441 |
+
return "❌ 无效的脚本选择", "", "", "", None
|
442 |
+
|
443 |
+
if not question.strip():
|
444 |
+
return "请输入问题!", "", "", "", None
|
445 |
+
|
446 |
+
# 清空日志队列
|
447 |
+
while not log_queue.empty():
|
448 |
+
log_queue.get()
|
449 |
+
|
450 |
+
# 创建日志目录
|
451 |
+
log_dir = Path("logs")
|
452 |
+
log_dir.mkdir(exist_ok=True)
|
453 |
+
|
454 |
+
# 创建带时间戳的日志文件
|
455 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
456 |
+
log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
|
457 |
+
|
458 |
+
# 构建命令
|
459 |
+
# 获取当前脚本所在的基础路径
|
460 |
+
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
461 |
+
|
462 |
+
cmd = [
|
463 |
+
sys.executable,
|
464 |
+
os.path.join(base_path, "owl", "script_adapter.py"),
|
465 |
+
os.path.join(base_path, "owl", script_name),
|
466 |
+
]
|
467 |
+
|
468 |
+
# 创建环境变量副本并添加问题
|
469 |
+
env = os.environ.copy()
|
470 |
+
# 确保问题是字符串类型
|
471 |
+
if not isinstance(question, str):
|
472 |
+
question = str(question)
|
473 |
+
# 保留换行符,但确保是有效的字符串
|
474 |
+
env["OWL_QUESTION"] = question
|
475 |
+
|
476 |
+
# 启动进程
|
477 |
+
with process_lock:
|
478 |
+
current_process = subprocess.Popen(
|
479 |
+
cmd,
|
480 |
+
stdout=subprocess.PIPE,
|
481 |
+
stderr=subprocess.STDOUT,
|
482 |
+
text=True,
|
483 |
+
bufsize=1,
|
484 |
+
env=env,
|
485 |
+
encoding="utf-8",
|
486 |
+
)
|
487 |
+
|
488 |
+
# 创建线程来读取输出
|
489 |
+
def read_output():
|
490 |
+
try:
|
491 |
+
# 使用唯一的时间戳确保日志文件名不重复
|
492 |
+
timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
493 |
+
unique_log_file = (
|
494 |
+
log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
|
495 |
+
)
|
496 |
+
|
497 |
+
# 使用这个唯一的文件名写入日志
|
498 |
+
with open(unique_log_file, "w", encoding="utf-8") as f:
|
499 |
+
# 更新全局日志文件路径
|
500 |
+
nonlocal log_file
|
501 |
+
log_file = unique_log_file
|
502 |
+
|
503 |
+
for line in iter(current_process.stdout.readline, ""):
|
504 |
+
if line:
|
505 |
+
# 写入日志文件
|
506 |
+
f.write(line)
|
507 |
+
f.flush()
|
508 |
+
# 添加到队列
|
509 |
+
log_queue.put(line)
|
510 |
+
except Exception as e:
|
511 |
+
log_queue.put(f"读取输出时出错: {str(e)}\n")
|
512 |
+
|
513 |
+
# 启动读取线程
|
514 |
+
threading.Thread(target=read_output, daemon=True).start()
|
515 |
+
|
516 |
+
# 收集日志
|
517 |
+
logs = []
|
518 |
+
progress(0, desc="正在运行...")
|
519 |
+
|
520 |
+
# 等待进程完成或超时
|
521 |
+
start_time = time.time()
|
522 |
+
timeout = 1800 # 30分钟超时
|
523 |
+
|
524 |
+
while current_process.poll() is None:
|
525 |
+
# 检查是否超时
|
526 |
+
if time.time() - start_time > timeout:
|
527 |
+
with process_lock:
|
528 |
+
if current_process.poll() is None:
|
529 |
+
if os.name == "nt":
|
530 |
+
current_process.send_signal(signal.CTRL_BREAK_EVENT)
|
531 |
+
else:
|
532 |
+
current_process.terminate()
|
533 |
+
log_queue.put("执行超时,已终止进程\n")
|
534 |
+
break
|
535 |
+
|
536 |
+
# 从队列获取日志
|
537 |
+
while not log_queue.empty():
|
538 |
+
log = log_queue.get()
|
539 |
+
logs.append(log)
|
540 |
+
|
541 |
+
# 更新进度
|
542 |
+
elapsed = time.time() - start_time
|
543 |
+
progress(min(elapsed / 300, 0.99), desc="正在运行...")
|
544 |
+
|
545 |
+
# 短暂休眠以减少CPU使用
|
546 |
+
time.sleep(0.1)
|
547 |
+
|
548 |
+
# 每秒更新一次日志显示
|
549 |
+
yield (
|
550 |
+
status_message(current_process),
|
551 |
+
extract_answer(logs),
|
552 |
+
"".join(logs),
|
553 |
+
str(log_file),
|
554 |
+
None,
|
555 |
+
)
|
556 |
+
|
557 |
+
# 获取剩余日志
|
558 |
+
while not log_queue.empty():
|
559 |
+
logs.append(log_queue.get())
|
560 |
+
|
561 |
+
# 提取聊天历史(如果有)
|
562 |
+
chat_history = extract_chat_history(logs)
|
563 |
+
|
564 |
+
# 返回最终状态和日志
|
565 |
+
return (
|
566 |
+
status_message(current_process),
|
567 |
+
extract_answer(logs),
|
568 |
+
"".join(logs),
|
569 |
+
str(log_file),
|
570 |
+
chat_history,
|
571 |
+
)
|
572 |
+
|
573 |
+
|
574 |
+
def status_message(process):
|
575 |
+
"""根据进程状态返回状态消息"""
|
576 |
+
if process.poll() is None:
|
577 |
+
return "⏳ 正在运行..."
|
578 |
+
elif process.returncode == 0:
|
579 |
+
return "✅ 执行成功"
|
580 |
+
else:
|
581 |
+
return f"❌ 执行失败 (返回码: {process.returncode})"
|
582 |
+
|
583 |
+
|
584 |
+
def extract_answer(logs):
|
585 |
+
"""从日志中提取答案"""
|
586 |
+
answer = ""
|
587 |
+
for log in logs:
|
588 |
+
if "Answer:" in log:
|
589 |
+
answer = log.split("Answer:", 1)[1].strip()
|
590 |
+
break
|
591 |
+
return answer
|
592 |
+
|
593 |
+
|
594 |
+
def extract_chat_history(logs):
|
595 |
+
"""尝试从日志中提取聊天历史"""
|
596 |
+
try:
|
597 |
+
chat_json_str = ""
|
598 |
+
capture_json = False
|
599 |
+
|
600 |
+
for log in logs:
|
601 |
+
if "chat_history" in log:
|
602 |
+
# 开始捕获JSON
|
603 |
+
start_idx = log.find("[")
|
604 |
+
if start_idx != -1:
|
605 |
+
capture_json = True
|
606 |
+
chat_json_str = log[start_idx:]
|
607 |
+
elif capture_json:
|
608 |
+
# 继续捕获JSON直到找到匹配的结束括号
|
609 |
+
chat_json_str += log
|
610 |
+
if "]" in log:
|
611 |
+
# 找到结束括号,尝试解析JSON
|
612 |
+
end_idx = chat_json_str.rfind("]") + 1
|
613 |
+
if end_idx > 0:
|
614 |
+
try:
|
615 |
+
# 清理可能的额外文本
|
616 |
+
json_str = chat_json_str[:end_idx].strip()
|
617 |
+
chat_data = json.loads(json_str)
|
618 |
+
|
619 |
+
# 格式化为Gradio聊天组件可用的格式
|
620 |
+
formatted_chat = []
|
621 |
+
for msg in chat_data:
|
622 |
+
if "role" in msg and "content" in msg:
|
623 |
+
role = "用户" if msg["role"] == "user" else "助手"
|
624 |
+
formatted_chat.append([role, msg["content"]])
|
625 |
+
return formatted_chat
|
626 |
+
except json.JSONDecodeError:
|
627 |
+
# 如果解析失败,继续捕获
|
628 |
+
pass
|
629 |
+
except Exception:
|
630 |
+
# 其他错误,停止捕获
|
631 |
+
capture_json = False
|
632 |
+
except Exception:
|
633 |
+
pass
|
634 |
+
return None
|
635 |
+
|
636 |
+
|
637 |
+
def create_ui():
|
638 |
+
"""创建Gradio界面"""
|
639 |
+
# 加载环境变量
|
640 |
+
env_vars = load_env_vars()
|
641 |
+
|
642 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
|
643 |
+
gr.Markdown(
|
644 |
+
"""
|
645 |
+
# 🦉 OWL 智能助手运行平台
|
646 |
+
|
647 |
+
选择一个模型并输入您的问题,系统将运行相应的脚本并显示结果。
|
648 |
+
"""
|
649 |
+
)
|
650 |
+
|
651 |
+
with gr.Tabs():
|
652 |
+
with gr.TabItem("运行模式"):
|
653 |
+
with gr.Row():
|
654 |
+
with gr.Column(scale=1):
|
655 |
+
# 确保默认值是SCRIPTS中存在的键
|
656 |
+
default_script = list(SCRIPTS.keys())[0] if SCRIPTS else None
|
657 |
+
script_dropdown = gr.Dropdown(
|
658 |
+
choices=list(SCRIPTS.keys()),
|
659 |
+
value=default_script,
|
660 |
+
label="选择模式",
|
661 |
+
)
|
662 |
+
|
663 |
+
script_info = gr.Textbox(
|
664 |
+
value=get_script_info(default_script)
|
665 |
+
if default_script
|
666 |
+
else "",
|
667 |
+
label="模型描述",
|
668 |
+
interactive=False,
|
669 |
+
)
|
670 |
+
|
671 |
+
script_dropdown.change(
|
672 |
+
fn=lambda x: get_script_info(x),
|
673 |
+
inputs=script_dropdown,
|
674 |
+
outputs=script_info,
|
675 |
+
)
|
676 |
+
|
677 |
+
question_input = gr.Textbox(
|
678 |
+
lines=8,
|
679 |
+
placeholder="请输入您的问题...",
|
680 |
+
label="问题",
|
681 |
+
elem_id="question_input",
|
682 |
+
show_copy_button=True,
|
683 |
+
)
|
684 |
+
|
685 |
+
gr.Markdown(
|
686 |
+
"""
|
687 |
+
> **注意**: 您输入的问题将替换脚本中的默认问题。系统会自动处理问题的替换,确保您的问题被正确使用。
|
688 |
+
> 支持多行输入,换行将被保留。
|
689 |
+
"""
|
690 |
+
)
|
691 |
+
|
692 |
+
with gr.Row():
|
693 |
+
run_button = gr.Button("运行", variant="primary")
|
694 |
+
stop_button = gr.Button("终止", variant="stop")
|
695 |
+
|
696 |
+
with gr.Column(scale=2):
|
697 |
+
with gr.Tabs():
|
698 |
+
with gr.TabItem("结果"):
|
699 |
+
status_output = gr.Textbox(label="状态")
|
700 |
+
answer_output = gr.Textbox(label="回答", lines=10)
|
701 |
+
log_file_output = gr.Textbox(label="日志文件路径")
|
702 |
+
|
703 |
+
with gr.TabItem("运行日志"):
|
704 |
+
log_output = gr.Textbox(label="完整日志", lines=25)
|
705 |
+
|
706 |
+
with gr.TabItem("聊天历史"):
|
707 |
+
chat_output = gr.Chatbot(label="对话历史")
|
708 |
+
|
709 |
+
# 示例问题
|
710 |
+
examples = [
|
711 |
+
[
|
712 |
+
"Qwen Mini (中文)",
|
713 |
+
"浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
|
714 |
+
],
|
715 |
+
[
|
716 |
+
"DeepSeek (中文)",
|
717 |
+
"请分析GitHub上CAMEL-AI项目的最新统计数据。找出该项目的星标数量、贡献者数量和最近的活跃度。然后,创建一个简单的Excel表格来展示这些数据,并生成一个柱状图来可视化这些指标。最后,总结CAMEL项目的受欢迎程度和发展趋势。",
|
718 |
+
],
|
719 |
+
[
|
720 |
+
"Default",
|
721 |
+
"Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer.",
|
722 |
+
],
|
723 |
+
]
|
724 |
+
|
725 |
+
gr.Examples(examples=examples, inputs=[script_dropdown, question_input])
|
726 |
+
|
727 |
+
with gr.TabItem("环境变量配置"):
|
728 |
+
env_inputs = {}
|
729 |
+
save_status = gr.Textbox(label="保存状态", interactive=False)
|
730 |
+
|
731 |
+
# 添加自定义环境变量部分
|
732 |
+
with gr.Accordion("添加自定义环境变量", open=True):
|
733 |
+
with gr.Row():
|
734 |
+
new_var_name = gr.Textbox(
|
735 |
+
label="环境变量名", placeholder="例如:MY_CUSTOM_API_KEY"
|
736 |
+
)
|
737 |
+
new_var_value = gr.Textbox(
|
738 |
+
label="环境变量值", placeholder="输入值"
|
739 |
+
)
|
740 |
+
new_var_type = gr.Dropdown(
|
741 |
+
choices=["text", "password"], value="text", label="类型"
|
742 |
+
)
|
743 |
+
|
744 |
+
add_var_button = gr.Button("添加环境变量", variant="primary")
|
745 |
+
add_var_status = gr.Textbox(label="添加状态", interactive=False)
|
746 |
+
|
747 |
+
# 自定义环境变量列表
|
748 |
+
custom_vars_list = gr.JSON(
|
749 |
+
value=ENV_GROUPS["自定义环境变量"],
|
750 |
+
label="已添加的自定义环境变量",
|
751 |
+
visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
|
752 |
+
)
|
753 |
+
|
754 |
+
# 更改和删除自定义环境变量部分
|
755 |
+
with gr.Accordion(
|
756 |
+
"更改或删除自定义环境变量",
|
757 |
+
open=True,
|
758 |
+
visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
|
759 |
+
) as update_delete_accordion:
|
760 |
+
with gr.Row():
|
761 |
+
# 创建下拉菜单,显示所有自定义环境变量
|
762 |
+
custom_var_dropdown = gr.Dropdown(
|
763 |
+
choices=[
|
764 |
+
var["name"] for var in ENV_GROUPS["自定义环境变量"]
|
765 |
+
],
|
766 |
+
label="选择环境变量",
|
767 |
+
interactive=True,
|
768 |
+
)
|
769 |
+
update_var_value = gr.Textbox(
|
770 |
+
label="新的环境变量值", placeholder="输入新值"
|
771 |
+
)
|
772 |
+
update_var_type = gr.Dropdown(
|
773 |
+
choices=["text", "password"], value="text", label="类型"
|
774 |
+
)
|
775 |
+
|
776 |
+
with gr.Row():
|
777 |
+
update_var_button = gr.Button("更新环境变量", variant="primary")
|
778 |
+
delete_var_button = gr.Button("删除环境变量", variant="stop")
|
779 |
+
|
780 |
+
update_var_status = gr.Textbox(label="操作状态", interactive=False)
|
781 |
+
|
782 |
+
# 添加环境变量按钮点击事件
|
783 |
+
add_var_button.click(
|
784 |
+
fn=add_custom_env_var,
|
785 |
+
inputs=[new_var_name, new_var_value, new_var_type],
|
786 |
+
outputs=[add_var_status, custom_vars_list],
|
787 |
+
).then(
|
788 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
789 |
+
inputs=[custom_vars_list],
|
790 |
+
outputs=[update_delete_accordion],
|
791 |
+
)
|
792 |
+
|
793 |
+
# 更新环境变量按钮点击事件
|
794 |
+
update_var_button.click(
|
795 |
+
fn=update_custom_env_var,
|
796 |
+
inputs=[custom_var_dropdown, update_var_value, update_var_type],
|
797 |
+
outputs=[update_var_status, custom_vars_list],
|
798 |
+
)
|
799 |
+
|
800 |
+
# 删除环境变量按钮点击事件
|
801 |
+
delete_var_button.click(
|
802 |
+
fn=delete_custom_env_var,
|
803 |
+
inputs=[custom_var_dropdown],
|
804 |
+
outputs=[update_var_status, custom_vars_list],
|
805 |
+
).then(
|
806 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
807 |
+
inputs=[custom_vars_list],
|
808 |
+
outputs=[update_delete_accordion],
|
809 |
+
)
|
810 |
+
|
811 |
+
# 当自定义环境变量列表更新时,更新下拉菜单选项
|
812 |
+
custom_vars_list.change(
|
813 |
+
fn=lambda vars: {
|
814 |
+
"choices": [var["name"] for var in vars],
|
815 |
+
"value": None,
|
816 |
+
},
|
817 |
+
inputs=[custom_vars_list],
|
818 |
+
outputs=[custom_var_dropdown],
|
819 |
+
)
|
820 |
+
|
821 |
+
# 现有环境变量配置
|
822 |
+
for group_name, vars in ENV_GROUPS.items():
|
823 |
+
if (
|
824 |
+
group_name != "自定义环境变量" or len(vars) > 0
|
825 |
+
): # 只显示非空的自定义环境变量组
|
826 |
+
with gr.Accordion(
|
827 |
+
group_name, open=(group_name != "自定义环境变量")
|
828 |
+
):
|
829 |
+
for var in vars:
|
830 |
+
# 添加帮助信息
|
831 |
+
gr.Markdown(f"**{var['help']}**")
|
832 |
+
|
833 |
+
if var["type"] == "password":
|
834 |
+
env_inputs[var["name"]] = gr.Textbox(
|
835 |
+
value=env_vars.get(var["name"], ""),
|
836 |
+
label=var["label"],
|
837 |
+
placeholder=f"请输入{var['label']}",
|
838 |
+
type="password",
|
839 |
+
)
|
840 |
+
else:
|
841 |
+
env_inputs[var["name"]] = gr.Textbox(
|
842 |
+
value=env_vars.get(var["name"], ""),
|
843 |
+
label=var["label"],
|
844 |
+
placeholder=f"请输入{var['label']}",
|
845 |
+
)
|
846 |
+
|
847 |
+
save_button = gr.Button("保存环境变量", variant="primary")
|
848 |
+
|
849 |
+
# 保存环境变量
|
850 |
+
save_inputs = [
|
851 |
+
env_inputs[var_name]
|
852 |
+
for group in ENV_GROUPS.values()
|
853 |
+
for var in group
|
854 |
+
for var_name in [var["name"]]
|
855 |
+
if var_name in env_inputs
|
856 |
+
]
|
857 |
+
save_button.click(
|
858 |
+
fn=lambda *values: save_env_vars(
|
859 |
+
dict(
|
860 |
+
zip(
|
861 |
+
[
|
862 |
+
var["name"]
|
863 |
+
for group in ENV_GROUPS.values()
|
864 |
+
for var in group
|
865 |
+
if var["name"] in env_inputs
|
866 |
+
],
|
867 |
+
values,
|
868 |
+
)
|
869 |
+
)
|
870 |
+
),
|
871 |
+
inputs=save_inputs,
|
872 |
+
outputs=save_status,
|
873 |
+
)
|
874 |
+
|
875 |
+
# 运行脚本
|
876 |
+
run_button.click(
|
877 |
+
fn=run_script,
|
878 |
+
inputs=[script_dropdown, question_input],
|
879 |
+
outputs=[
|
880 |
+
status_output,
|
881 |
+
answer_output,
|
882 |
+
log_output,
|
883 |
+
log_file_output,
|
884 |
+
chat_output,
|
885 |
+
],
|
886 |
+
show_progress=True,
|
887 |
+
)
|
888 |
+
|
889 |
+
# 终止运行
|
890 |
+
stop_button.click(fn=terminate_process, inputs=[], outputs=[status_output])
|
891 |
+
|
892 |
+
# 添加页脚
|
893 |
+
gr.Markdown(
|
894 |
+
"""
|
895 |
+
### 📝 使用说明
|
896 |
+
|
897 |
+
- 选择一个模型并输入您的问题
|
898 |
+
- 点击"运行"按钮开始执行
|
899 |
+
- 如需终止运行,点击"终止"按钮
|
900 |
+
- 在"结果"标签页查看执行状态和回答
|
901 |
+
- 在"运行日志"标签页查看完整日志
|
902 |
+
- 在"聊天历史"标签页查看对话历史(如果有)
|
903 |
+
- 在"环境变量配置"标签页配置API密钥和其他环境变量
|
904 |
+
- 您可以添加自定义环境变量,满足特殊需求
|
905 |
+
|
906 |
+
### ⚠️ 注意事项
|
907 |
+
|
908 |
+
- 运行某些模型可能需要API密钥,请确保在"环境变量配置"标签页中设置了相应的环境变量
|
909 |
+
- 某些脚本可能需要较长时间运行,请耐心等待
|
910 |
+
- 如果运行超过30分钟,进程将自动终止
|
911 |
+
- 您输入的问题将替换脚本中的默认问题,确保问题与所选模型兼容
|
912 |
+
"""
|
913 |
+
)
|
914 |
+
|
915 |
+
return app
|
916 |
+
|
917 |
+
|
918 |
+
if __name__ == "__main__":
|
919 |
+
# 创建并启动应用
|
920 |
+
app = create_ui()
|
921 |
+
app.queue().launch(share=True)
|
owl/app_en.py
ADDED
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
import gradio as gr
|
17 |
+
import subprocess
|
18 |
+
import threading
|
19 |
+
import time
|
20 |
+
from datetime import datetime
|
21 |
+
import queue
|
22 |
+
from pathlib import Path
|
23 |
+
import json
|
24 |
+
import signal
|
25 |
+
import dotenv
|
26 |
+
|
27 |
+
# Set up log queue
|
28 |
+
log_queue: queue.Queue[str] = queue.Queue()
|
29 |
+
|
30 |
+
# Currently running process
|
31 |
+
current_process = None
|
32 |
+
process_lock = threading.Lock()
|
33 |
+
|
34 |
+
# Script options
|
35 |
+
SCRIPTS = {
|
36 |
+
"Qwen Mini (Chinese)": "run_qwen_mini_zh.py",
|
37 |
+
"Qwen (Chinese)": "run_qwen_zh.py",
|
38 |
+
"Mini": "run_mini.py",
|
39 |
+
"DeepSeek (Chinese)": "run_deepseek_zh.py",
|
40 |
+
"Default": "run.py",
|
41 |
+
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
42 |
+
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
43 |
+
"Azure OpenAI": "run_azure_openai.py",
|
44 |
+
"Ollama": "run_ollama.py",
|
45 |
+
"Terminal": "run_terminal.py",
|
46 |
+
}
|
47 |
+
|
48 |
+
# Script descriptions
|
49 |
+
SCRIPT_DESCRIPTIONS = {
|
50 |
+
"Qwen Mini (Chinese)": "Uses the Chinese version of Alibaba Cloud's Qwen model, suitable for Chinese Q&A and tasks",
|
51 |
+
"Qwen (Chinese)": "Uses Alibaba Cloud's Qwen model, supports various tools and functions",
|
52 |
+
"Mini": "Lightweight version, uses OpenAI GPT-4o model",
|
53 |
+
"DeepSeek (Chinese)": "Uses DeepSeek model, suitable for non-multimodal tasks",
|
54 |
+
"Default": "Default OWL implementation, uses OpenAI GPT-4o model and full set of tools",
|
55 |
+
"GAIA Roleplaying": "GAIA benchmark implementation, used to evaluate model capabilities",
|
56 |
+
"OpenAI Compatible": "Uses third-party models compatible with OpenAI API, supports custom API endpoints",
|
57 |
+
"Azure OpenAI": "Uses Azure OpenAI API",
|
58 |
+
"Ollama": "Uses Ollama API",
|
59 |
+
"Terminal": "Uses local terminal to execute python files",
|
60 |
+
}
|
61 |
+
|
62 |
+
# Environment variable groups
|
63 |
+
ENV_GROUPS = {
|
64 |
+
"Model API": [
|
65 |
+
{
|
66 |
+
"name": "OPENAI_API_KEY",
|
67 |
+
"label": "OpenAI API Key",
|
68 |
+
"type": "password",
|
69 |
+
"required": False,
|
70 |
+
"help": "OpenAI API key for accessing GPT models. Get it from: https://platform.openai.com/api-keys",
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"name": "OPENAI_API_BASE_URL",
|
74 |
+
"label": "OpenAI API Base URL",
|
75 |
+
"type": "text",
|
76 |
+
"required": False,
|
77 |
+
"help": "Base URL for OpenAI API, optional. Set this if using a proxy or custom endpoint.",
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"name": "AZURE_OPENAI_KEY",
|
81 |
+
"label": "Azure OpenAI API Key",
|
82 |
+
"type": "password",
|
83 |
+
"required": False,
|
84 |
+
"help": "Azure OpenAI API key for accessing Azure deployed GPT models. Get it from: https://portal.azure.com/",
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"name": "AZURE_OPENAI_ENDPOINT",
|
88 |
+
"label": "Azure OpenAI Endpoint",
|
89 |
+
"type": "text",
|
90 |
+
"required": False,
|
91 |
+
"help": "Azure OpenAI service endpoint URL",
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"name": "AZURE_DEPLOYMENT_NAME",
|
95 |
+
"label": "Azure OpenAI Deployment Name",
|
96 |
+
"type": "text",
|
97 |
+
"required": False,
|
98 |
+
"help": "Azure OpenAI service deployment name",
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"name": "AZURE_OPENAI_VERSION",
|
102 |
+
"label": "Azure OpenAI API Version",
|
103 |
+
"type": "text",
|
104 |
+
"required": False,
|
105 |
+
"help": "Azure OpenAI API version, e.g. 2023-12-01-preview",
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"name": "QWEN_API_KEY",
|
109 |
+
"label": "Alibaba Cloud Qwen API Key",
|
110 |
+
"type": "password",
|
111 |
+
"required": False,
|
112 |
+
"help": "Alibaba Cloud Qwen API key for accessing Qwen models. Get it from: https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"name": "DEEPSEEK_API_KEY",
|
116 |
+
"label": "DeepSeek API Key",
|
117 |
+
"type": "password",
|
118 |
+
"required": False,
|
119 |
+
"help": "DeepSeek API key for accessing DeepSeek models. Get it from: https://platform.deepseek.com/api_keys",
|
120 |
+
},
|
121 |
+
],
|
122 |
+
"Search Tools": [
|
123 |
+
{
|
124 |
+
"name": "GOOGLE_API_KEY",
|
125 |
+
"label": "Google API Key",
|
126 |
+
"type": "password",
|
127 |
+
"required": False,
|
128 |
+
"help": "Google Search API key for web search functionality. Get it from: https://developers.google.com/custom-search/v1/overview",
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"name": "SEARCH_ENGINE_ID",
|
132 |
+
"label": "Search Engine ID",
|
133 |
+
"type": "text",
|
134 |
+
"required": False,
|
135 |
+
"help": "Google Custom Search Engine ID, used with Google API key. Get it from: https://developers.google.com/custom-search/v1/overview",
|
136 |
+
},
|
137 |
+
],
|
138 |
+
"Other Tools": [
|
139 |
+
{
|
140 |
+
"name": "HF_TOKEN",
|
141 |
+
"label": "Hugging Face Token",
|
142 |
+
"type": "password",
|
143 |
+
"required": False,
|
144 |
+
"help": "Hugging Face API token for accessing Hugging Face models and datasets. Get it from: https://huggingface.co/join",
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"name": "CHUNKR_API_KEY",
|
148 |
+
"label": "Chunkr API Key",
|
149 |
+
"type": "password",
|
150 |
+
"required": False,
|
151 |
+
"help": "Chunkr API key for document processing functionality. Get it from: https://chunkr.ai/",
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"name": "FIRECRAWL_API_KEY",
|
155 |
+
"label": "Firecrawl API Key",
|
156 |
+
"type": "password",
|
157 |
+
"required": False,
|
158 |
+
"help": "Firecrawl API key for web crawling functionality. Get it from: https://www.firecrawl.dev/",
|
159 |
+
},
|
160 |
+
],
|
161 |
+
"Custom Environment Variables": [], # User-defined environment variables will be stored here
|
162 |
+
}
|
163 |
+
|
164 |
+
|
165 |
+
def get_script_info(script_name):
|
166 |
+
"""Get detailed information about the script"""
|
167 |
+
return SCRIPT_DESCRIPTIONS.get(script_name, "No description available")
|
168 |
+
|
169 |
+
|
170 |
+
def load_env_vars():
|
171 |
+
"""Load environment variables"""
|
172 |
+
env_vars = {}
|
173 |
+
# Try to load from .env file
|
174 |
+
dotenv.load_dotenv()
|
175 |
+
|
176 |
+
# Get all environment variables
|
177 |
+
for group in ENV_GROUPS.values():
|
178 |
+
for var in group:
|
179 |
+
env_vars[var["name"]] = os.environ.get(var["name"], "")
|
180 |
+
|
181 |
+
# Load other environment variables that may exist in the .env file
|
182 |
+
if Path(".env").exists():
|
183 |
+
try:
|
184 |
+
with open(".env", "r", encoding="utf-8") as f:
|
185 |
+
for line in f:
|
186 |
+
line = line.strip()
|
187 |
+
if line and not line.startswith("#") and "=" in line:
|
188 |
+
try:
|
189 |
+
key, value = line.split("=", 1)
|
190 |
+
key = key.strip()
|
191 |
+
value = value.strip()
|
192 |
+
|
193 |
+
# Handle quoted values
|
194 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
195 |
+
value.startswith("'") and value.endswith("'")
|
196 |
+
):
|
197 |
+
value = value[
|
198 |
+
1:-1
|
199 |
+
] # Remove quotes at the beginning and end
|
200 |
+
|
201 |
+
# Check if it's a known environment variable
|
202 |
+
known_var = False
|
203 |
+
for group in ENV_GROUPS.values():
|
204 |
+
if any(var["name"] == key for var in group):
|
205 |
+
known_var = True
|
206 |
+
break
|
207 |
+
|
208 |
+
# If it's not a known environment variable, add it to the custom environment variables group
|
209 |
+
if not known_var and key not in env_vars:
|
210 |
+
ENV_GROUPS["Custom Environment Variables"].append(
|
211 |
+
{
|
212 |
+
"name": key,
|
213 |
+
"label": key,
|
214 |
+
"type": "text",
|
215 |
+
"required": False,
|
216 |
+
"help": "User-defined environment variable",
|
217 |
+
}
|
218 |
+
)
|
219 |
+
env_vars[key] = value
|
220 |
+
except Exception as e:
|
221 |
+
print(
|
222 |
+
f"Error parsing environment variable line: {line}, error: {str(e)}"
|
223 |
+
)
|
224 |
+
except Exception as e:
|
225 |
+
print(f"Error loading .env file: {str(e)}")
|
226 |
+
|
227 |
+
return env_vars
|
228 |
+
|
229 |
+
|
230 |
+
def save_env_vars(env_vars):
|
231 |
+
"""Save environment variables to .env file"""
|
232 |
+
# Read existing .env file content
|
233 |
+
env_path = Path(".env")
|
234 |
+
existing_content = {}
|
235 |
+
|
236 |
+
if env_path.exists():
|
237 |
+
try:
|
238 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
239 |
+
for line in f:
|
240 |
+
line = line.strip()
|
241 |
+
if line and not line.startswith("#") and "=" in line:
|
242 |
+
try:
|
243 |
+
key, value = line.split("=", 1)
|
244 |
+
existing_content[key.strip()] = value.strip()
|
245 |
+
except Exception as e:
|
246 |
+
print(
|
247 |
+
f"Error parsing environment variable line: {line}, error: {str(e)}"
|
248 |
+
)
|
249 |
+
except Exception as e:
|
250 |
+
print(f"Error reading .env file: {str(e)}")
|
251 |
+
|
252 |
+
# Update environment variables
|
253 |
+
for key, value in env_vars.items():
|
254 |
+
if value is not None: # Allow empty string values, but not None
|
255 |
+
# Ensure the value is a string
|
256 |
+
value = str(value) # Ensure the value is a string
|
257 |
+
|
258 |
+
# Check if the value is already wrapped in quotes
|
259 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
260 |
+
value.startswith("'") and value.endswith("'")
|
261 |
+
):
|
262 |
+
# Already wrapped in quotes, keep as is
|
263 |
+
existing_content[key] = value
|
264 |
+
# Update environment variable by removing quotes
|
265 |
+
os.environ[key] = value[1:-1]
|
266 |
+
else:
|
267 |
+
# Not wrapped in quotes, add double quotes
|
268 |
+
# Wrap the value in double quotes to ensure special characters are handled correctly
|
269 |
+
quoted_value = f'"{value}"'
|
270 |
+
existing_content[key] = quoted_value
|
271 |
+
# Also update the environment variable for the current process (using the unquoted value)
|
272 |
+
os.environ[key] = value
|
273 |
+
|
274 |
+
# Write to .env file
|
275 |
+
try:
|
276 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
277 |
+
for key, value in existing_content.items():
|
278 |
+
f.write(f"{key}={value}\n")
|
279 |
+
except Exception as e:
|
280 |
+
print(f"Error writing to .env file: {str(e)}")
|
281 |
+
return f"❌ Failed to save environment variables: {str(e)}"
|
282 |
+
|
283 |
+
return "✅ Environment variables saved"
|
284 |
+
|
285 |
+
|
286 |
+
def add_custom_env_var(name, value, var_type):
|
287 |
+
"""Add custom environment variable"""
|
288 |
+
if not name:
|
289 |
+
return "❌ Environment variable name cannot be empty", None
|
290 |
+
|
291 |
+
# Check if an environment variable with the same name already exists
|
292 |
+
for group in ENV_GROUPS.values():
|
293 |
+
if any(var["name"] == name for var in group):
|
294 |
+
return f"❌ Environment variable {name} already exists", None
|
295 |
+
|
296 |
+
# Add to custom environment variables group
|
297 |
+
ENV_GROUPS["Custom Environment Variables"].append(
|
298 |
+
{
|
299 |
+
"name": name,
|
300 |
+
"label": name,
|
301 |
+
"type": var_type,
|
302 |
+
"required": False,
|
303 |
+
"help": "User-defined environment variable",
|
304 |
+
}
|
305 |
+
)
|
306 |
+
|
307 |
+
# Save environment variables
|
308 |
+
env_vars = {name: value}
|
309 |
+
save_env_vars(env_vars)
|
310 |
+
|
311 |
+
# Return success message and updated environment variable group
|
312 |
+
return f"✅ Added environment variable {name}", ENV_GROUPS[
|
313 |
+
"Custom Environment Variables"
|
314 |
+
]
|
315 |
+
|
316 |
+
|
317 |
+
def update_custom_env_var(name, value, var_type):
|
318 |
+
"""Update custom environment variable"""
|
319 |
+
if not name:
|
320 |
+
return "❌ Environment variable name cannot be empty", None
|
321 |
+
|
322 |
+
# Check if the environment variable exists in the custom environment variables group
|
323 |
+
found = False
|
324 |
+
for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
|
325 |
+
if var["name"] == name:
|
326 |
+
# Update type
|
327 |
+
ENV_GROUPS["Custom Environment Variables"][i]["type"] = var_type
|
328 |
+
found = True
|
329 |
+
break
|
330 |
+
|
331 |
+
if not found:
|
332 |
+
return f"❌ Custom environment variable {name} does not exist", None
|
333 |
+
|
334 |
+
# Save environment variable value
|
335 |
+
env_vars = {name: value}
|
336 |
+
save_env_vars(env_vars)
|
337 |
+
|
338 |
+
# Return success message and updated environment variable group
|
339 |
+
return f"✅ Updated environment variable {name}", ENV_GROUPS[
|
340 |
+
"Custom Environment Variables"
|
341 |
+
]
|
342 |
+
|
343 |
+
|
344 |
+
def delete_custom_env_var(name):
|
345 |
+
"""Delete custom environment variable"""
|
346 |
+
if not name:
|
347 |
+
return "❌ Environment variable name cannot be empty", None
|
348 |
+
|
349 |
+
# Check if the environment variable exists in the custom environment variables group
|
350 |
+
found = False
|
351 |
+
for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
|
352 |
+
if var["name"] == name:
|
353 |
+
# Delete from custom environment variables group
|
354 |
+
del ENV_GROUPS["Custom Environment Variables"][i]
|
355 |
+
found = True
|
356 |
+
break
|
357 |
+
|
358 |
+
if not found:
|
359 |
+
return f"❌ Custom environment variable {name} does not exist", None
|
360 |
+
|
361 |
+
# Delete the environment variable from .env file
|
362 |
+
env_path = Path(".env")
|
363 |
+
if env_path.exists():
|
364 |
+
try:
|
365 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
366 |
+
lines = f.readlines()
|
367 |
+
|
368 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
369 |
+
for line in lines:
|
370 |
+
try:
|
371 |
+
# More precisely match environment variable lines
|
372 |
+
line_stripped = line.strip()
|
373 |
+
# Check if it's a comment line or empty line
|
374 |
+
if not line_stripped or line_stripped.startswith("#"):
|
375 |
+
f.write(line) # Keep comment lines and empty lines
|
376 |
+
continue
|
377 |
+
|
378 |
+
# Check if it contains an equals sign
|
379 |
+
if "=" not in line_stripped:
|
380 |
+
f.write(line) # Keep lines without equals sign
|
381 |
+
continue
|
382 |
+
|
383 |
+
# Extract variable name and check if it matches the variable to be deleted
|
384 |
+
var_name = line_stripped.split("=", 1)[0].strip()
|
385 |
+
if var_name != name:
|
386 |
+
f.write(line) # Keep variables that don't match
|
387 |
+
except Exception as e:
|
388 |
+
print(
|
389 |
+
f"Error processing .env file line: {line}, error: {str(e)}"
|
390 |
+
)
|
391 |
+
# Keep the original line when an error occurs
|
392 |
+
f.write(line)
|
393 |
+
except Exception as e:
|
394 |
+
print(f"Error deleting environment variable: {str(e)}")
|
395 |
+
return f"❌ Failed to delete environment variable: {str(e)}", None
|
396 |
+
|
397 |
+
# Delete from current process environment variables
|
398 |
+
if name in os.environ:
|
399 |
+
del os.environ[name]
|
400 |
+
|
401 |
+
# Return success message and updated environment variable group
|
402 |
+
return f"✅ Deleted environment variable {name}", ENV_GROUPS[
|
403 |
+
"Custom Environment Variables"
|
404 |
+
]
|
405 |
+
|
406 |
+
|
407 |
+
def terminate_process():
|
408 |
+
"""Terminate the currently running process"""
|
409 |
+
global current_process
|
410 |
+
|
411 |
+
with process_lock:
|
412 |
+
if current_process is not None and current_process.poll() is None:
|
413 |
+
try:
|
414 |
+
# On Windows, use taskkill to forcibly terminate the process tree
|
415 |
+
if os.name == "nt":
|
416 |
+
# Get process ID
|
417 |
+
pid = current_process.pid
|
418 |
+
# Use taskkill command to terminate the process and its children - avoid using shell=True for better security
|
419 |
+
try:
|
420 |
+
subprocess.run(
|
421 |
+
["taskkill", "/F", "/T", "/PID", str(pid)], check=False
|
422 |
+
)
|
423 |
+
except subprocess.SubprocessError as e:
|
424 |
+
log_queue.put(f"Error terminating process: {str(e)}\n")
|
425 |
+
return f"❌ Error terminating process: {str(e)}"
|
426 |
+
else:
|
427 |
+
# On Unix, use SIGTERM and SIGKILL
|
428 |
+
current_process.terminate()
|
429 |
+
try:
|
430 |
+
current_process.wait(timeout=3)
|
431 |
+
except subprocess.TimeoutExpired:
|
432 |
+
current_process.kill()
|
433 |
+
|
434 |
+
# Wait for process to terminate
|
435 |
+
try:
|
436 |
+
current_process.wait(timeout=2)
|
437 |
+
except subprocess.TimeoutExpired:
|
438 |
+
pass # Already tried to force terminate, ignore timeout
|
439 |
+
|
440 |
+
log_queue.put("Process terminated\n")
|
441 |
+
return "✅ Process terminated"
|
442 |
+
except Exception as e:
|
443 |
+
log_queue.put(f"Error terminating process: {str(e)}\n")
|
444 |
+
return f"❌ Error terminating process: {str(e)}"
|
445 |
+
else:
|
446 |
+
return "❌ No process is currently running"
|
447 |
+
|
448 |
+
|
449 |
+
def run_script(script_dropdown, question, progress=gr.Progress()):
|
450 |
+
"""Run the selected script and return the output"""
|
451 |
+
global current_process
|
452 |
+
|
453 |
+
script_name = SCRIPTS.get(script_dropdown)
|
454 |
+
if not script_name:
|
455 |
+
return "❌ Invalid script selection", "", "", "", None
|
456 |
+
|
457 |
+
if not question.strip():
|
458 |
+
return "Please enter a question!", "", "", "", None
|
459 |
+
|
460 |
+
# Clear the log queue
|
461 |
+
while not log_queue.empty():
|
462 |
+
log_queue.get()
|
463 |
+
|
464 |
+
# Create log directory
|
465 |
+
log_dir = Path("logs")
|
466 |
+
log_dir.mkdir(exist_ok=True)
|
467 |
+
|
468 |
+
# Create log file with timestamp
|
469 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
470 |
+
log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
|
471 |
+
|
472 |
+
# Build command
|
473 |
+
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
474 |
+
cmd = [
|
475 |
+
sys.executable,
|
476 |
+
os.path.join(base_path, "owl", "script_adapter.py"),
|
477 |
+
os.path.join(base_path, "owl", script_name),
|
478 |
+
]
|
479 |
+
|
480 |
+
# Create a copy of environment variables and add the question
|
481 |
+
env = os.environ.copy()
|
482 |
+
# Ensure question is a string type
|
483 |
+
if not isinstance(question, str):
|
484 |
+
question = str(question)
|
485 |
+
# Preserve newlines, but ensure it's a valid string
|
486 |
+
env["OWL_QUESTION"] = question
|
487 |
+
|
488 |
+
# Start the process
|
489 |
+
with process_lock:
|
490 |
+
current_process = subprocess.Popen(
|
491 |
+
cmd,
|
492 |
+
stdout=subprocess.PIPE,
|
493 |
+
stderr=subprocess.STDOUT,
|
494 |
+
text=True,
|
495 |
+
bufsize=1,
|
496 |
+
env=env,
|
497 |
+
encoding="utf-8",
|
498 |
+
)
|
499 |
+
|
500 |
+
# Create thread to read output
|
501 |
+
def read_output():
|
502 |
+
try:
|
503 |
+
# Use a unique timestamp to ensure log filename is not duplicated
|
504 |
+
timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
505 |
+
unique_log_file = (
|
506 |
+
log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
|
507 |
+
)
|
508 |
+
|
509 |
+
# Use this unique filename to write logs
|
510 |
+
with open(unique_log_file, "w", encoding="utf-8") as f:
|
511 |
+
# Update global log file path
|
512 |
+
nonlocal log_file
|
513 |
+
log_file = unique_log_file
|
514 |
+
|
515 |
+
for line in iter(current_process.stdout.readline, ""):
|
516 |
+
if line:
|
517 |
+
# Write to log file
|
518 |
+
f.write(line)
|
519 |
+
f.flush()
|
520 |
+
# Add to queue
|
521 |
+
log_queue.put(line)
|
522 |
+
except Exception as e:
|
523 |
+
log_queue.put(f"Error reading output: {str(e)}\n")
|
524 |
+
|
525 |
+
# Start the reading thread
|
526 |
+
threading.Thread(target=read_output, daemon=True).start()
|
527 |
+
|
528 |
+
# Collect logs
|
529 |
+
logs = []
|
530 |
+
progress(0, desc="Running...")
|
531 |
+
|
532 |
+
# Wait for process to complete or timeout
|
533 |
+
start_time = time.time()
|
534 |
+
timeout = 1800 # 30 minutes timeout
|
535 |
+
|
536 |
+
while current_process.poll() is None:
|
537 |
+
# Check if timeout
|
538 |
+
if time.time() - start_time > timeout:
|
539 |
+
with process_lock:
|
540 |
+
if current_process.poll() is None:
|
541 |
+
if os.name == "nt":
|
542 |
+
current_process.send_signal(signal.CTRL_BREAK_EVENT)
|
543 |
+
else:
|
544 |
+
current_process.terminate()
|
545 |
+
log_queue.put("Execution timeout, process terminated\n")
|
546 |
+
break
|
547 |
+
|
548 |
+
# Get logs from queue
|
549 |
+
while not log_queue.empty():
|
550 |
+
log = log_queue.get()
|
551 |
+
logs.append(log)
|
552 |
+
|
553 |
+
# Update progress
|
554 |
+
elapsed = time.time() - start_time
|
555 |
+
progress(min(elapsed / 300, 0.99), desc="Running...")
|
556 |
+
|
557 |
+
# Short sleep to reduce CPU usage
|
558 |
+
time.sleep(0.1)
|
559 |
+
|
560 |
+
# Update log display once per second
|
561 |
+
yield (
|
562 |
+
status_message(current_process),
|
563 |
+
extract_answer(logs),
|
564 |
+
"".join(logs),
|
565 |
+
str(log_file),
|
566 |
+
None,
|
567 |
+
)
|
568 |
+
|
569 |
+
# Get remaining logs
|
570 |
+
while not log_queue.empty():
|
571 |
+
logs.append(log_queue.get())
|
572 |
+
|
573 |
+
# Extract chat history (if any)
|
574 |
+
chat_history = extract_chat_history(logs)
|
575 |
+
|
576 |
+
# Return final status and logs
|
577 |
+
return (
|
578 |
+
status_message(current_process),
|
579 |
+
extract_answer(logs),
|
580 |
+
"".join(logs),
|
581 |
+
str(log_file),
|
582 |
+
chat_history,
|
583 |
+
)
|
584 |
+
|
585 |
+
|
586 |
+
def status_message(process):
|
587 |
+
"""Return status message based on process status"""
|
588 |
+
if process.poll() is None:
|
589 |
+
return "⏳ Running..."
|
590 |
+
elif process.returncode == 0:
|
591 |
+
return "✅ Execution successful"
|
592 |
+
else:
|
593 |
+
return f"❌ Execution failed (return code: {process.returncode})"
|
594 |
+
|
595 |
+
|
596 |
+
def extract_answer(logs):
|
597 |
+
"""Extract answer from logs"""
|
598 |
+
answer = ""
|
599 |
+
for log in logs:
|
600 |
+
if "Answer:" in log:
|
601 |
+
answer = log.split("Answer:", 1)[1].strip()
|
602 |
+
break
|
603 |
+
return answer
|
604 |
+
|
605 |
+
|
606 |
+
def extract_chat_history(logs):
|
607 |
+
"""Try to extract chat history from logs"""
|
608 |
+
try:
|
609 |
+
chat_json_str = ""
|
610 |
+
capture_json = False
|
611 |
+
|
612 |
+
for log in logs:
|
613 |
+
if "chat_history" in log:
|
614 |
+
# Start capturing JSON
|
615 |
+
start_idx = log.find("[")
|
616 |
+
if start_idx != -1:
|
617 |
+
capture_json = True
|
618 |
+
chat_json_str = log[start_idx:]
|
619 |
+
elif capture_json:
|
620 |
+
# Continue capturing JSON until finding the matching closing bracket
|
621 |
+
chat_json_str += log
|
622 |
+
if "]" in log:
|
623 |
+
# Found closing bracket, try to parse JSON
|
624 |
+
end_idx = chat_json_str.rfind("]") + 1
|
625 |
+
if end_idx > 0:
|
626 |
+
try:
|
627 |
+
# Clean up possible extra text
|
628 |
+
json_str = chat_json_str[:end_idx].strip()
|
629 |
+
chat_data = json.loads(json_str)
|
630 |
+
|
631 |
+
# Format for use with Gradio chat component
|
632 |
+
formatted_chat = []
|
633 |
+
for msg in chat_data:
|
634 |
+
if "role" in msg and "content" in msg:
|
635 |
+
role = (
|
636 |
+
"User" if msg["role"] == "user" else "Assistant"
|
637 |
+
)
|
638 |
+
formatted_chat.append([role, msg["content"]])
|
639 |
+
return formatted_chat
|
640 |
+
except json.JSONDecodeError:
|
641 |
+
# If parsing fails, continue capturing
|
642 |
+
pass
|
643 |
+
except Exception:
|
644 |
+
# Other errors, stop capturing
|
645 |
+
capture_json = False
|
646 |
+
except Exception:
|
647 |
+
pass
|
648 |
+
return None
|
649 |
+
|
650 |
+
|
651 |
+
def create_ui():
|
652 |
+
"""Create Gradio interface"""
|
653 |
+
# Load environment variables
|
654 |
+
env_vars = load_env_vars()
|
655 |
+
|
656 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
|
657 |
+
gr.Markdown(
|
658 |
+
"""
|
659 |
+
# 🦉 OWL Intelligent Assistant Platform
|
660 |
+
|
661 |
+
Select a model and enter your question, the system will run the corresponding script and display the results.
|
662 |
+
"""
|
663 |
+
)
|
664 |
+
|
665 |
+
with gr.Tabs():
|
666 |
+
with gr.TabItem("Run Mode"):
|
667 |
+
with gr.Row():
|
668 |
+
with gr.Column(scale=1):
|
669 |
+
# Ensure default value is a key that exists in SCRIPTS
|
670 |
+
default_script = list(SCRIPTS.keys())[0] if SCRIPTS else None
|
671 |
+
script_dropdown = gr.Dropdown(
|
672 |
+
choices=list(SCRIPTS.keys()),
|
673 |
+
value=default_script,
|
674 |
+
label="Select Mode",
|
675 |
+
)
|
676 |
+
|
677 |
+
script_info = gr.Textbox(
|
678 |
+
value=get_script_info(default_script)
|
679 |
+
if default_script
|
680 |
+
else "",
|
681 |
+
label="Model Description",
|
682 |
+
interactive=False,
|
683 |
+
)
|
684 |
+
|
685 |
+
script_dropdown.change(
|
686 |
+
fn=lambda x: get_script_info(x),
|
687 |
+
inputs=script_dropdown,
|
688 |
+
outputs=script_info,
|
689 |
+
)
|
690 |
+
|
691 |
+
question_input = gr.Textbox(
|
692 |
+
lines=8,
|
693 |
+
placeholder="Please enter your question...",
|
694 |
+
label="Question",
|
695 |
+
elem_id="question_input",
|
696 |
+
show_copy_button=True,
|
697 |
+
)
|
698 |
+
|
699 |
+
gr.Markdown(
|
700 |
+
"""
|
701 |
+
> **Note**: Your question will replace the default question in the script. The system will automatically handle the replacement, ensuring your question is used correctly.
|
702 |
+
> Multi-line input is supported, line breaks will be preserved.
|
703 |
+
"""
|
704 |
+
)
|
705 |
+
|
706 |
+
with gr.Row():
|
707 |
+
run_button = gr.Button("Run", variant="primary")
|
708 |
+
stop_button = gr.Button("Stop", variant="stop")
|
709 |
+
|
710 |
+
with gr.Column(scale=2):
|
711 |
+
with gr.Tabs():
|
712 |
+
with gr.TabItem("Results"):
|
713 |
+
status_output = gr.Textbox(label="Status")
|
714 |
+
answer_output = gr.Textbox(label="Answer", lines=10)
|
715 |
+
log_file_output = gr.Textbox(label="Log File Path")
|
716 |
+
|
717 |
+
with gr.TabItem("Run Logs"):
|
718 |
+
log_output = gr.Textbox(label="Complete Logs", lines=25)
|
719 |
+
|
720 |
+
with gr.TabItem("Chat History"):
|
721 |
+
chat_output = gr.Chatbot(label="Conversation History")
|
722 |
+
|
723 |
+
# Example questions
|
724 |
+
examples = [
|
725 |
+
[
|
726 |
+
"Qwen Mini (Chinese)",
|
727 |
+
"Browse Amazon and find a product that is attractive to programmers. Please provide the product name and price.",
|
728 |
+
],
|
729 |
+
[
|
730 |
+
"DeepSeek (Chinese)",
|
731 |
+
"Please analyze the latest statistics of the CAMEL-AI project on GitHub. Find out the number of stars, number of contributors, and recent activity of the project. Then, create a simple Excel spreadsheet to display this data and generate a bar chart to visualize these metrics. Finally, summarize the popularity and development trends of the CAMEL project.",
|
732 |
+
],
|
733 |
+
[
|
734 |
+
"Default",
|
735 |
+
"Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer.",
|
736 |
+
],
|
737 |
+
]
|
738 |
+
|
739 |
+
gr.Examples(examples=examples, inputs=[script_dropdown, question_input])
|
740 |
+
|
741 |
+
with gr.TabItem("Environment Variable Configuration"):
|
742 |
+
env_inputs = {}
|
743 |
+
save_status = gr.Textbox(label="Save Status", interactive=False)
|
744 |
+
|
745 |
+
# Add custom environment variables section
|
746 |
+
with gr.Accordion("Add Custom Environment Variables", open=True):
|
747 |
+
with gr.Row():
|
748 |
+
new_var_name = gr.Textbox(
|
749 |
+
label="Environment Variable Name",
|
750 |
+
placeholder="Example: MY_CUSTOM_API_KEY",
|
751 |
+
)
|
752 |
+
new_var_value = gr.Textbox(
|
753 |
+
label="Environment Variable Value",
|
754 |
+
placeholder="Enter value",
|
755 |
+
)
|
756 |
+
new_var_type = gr.Dropdown(
|
757 |
+
choices=["text", "password"], value="text", label="Type"
|
758 |
+
)
|
759 |
+
|
760 |
+
add_var_button = gr.Button(
|
761 |
+
"Add Environment Variable", variant="primary"
|
762 |
+
)
|
763 |
+
add_var_status = gr.Textbox(label="Add Status", interactive=False)
|
764 |
+
|
765 |
+
# Custom environment variables list
|
766 |
+
custom_vars_list = gr.JSON(
|
767 |
+
value=ENV_GROUPS["Custom Environment Variables"],
|
768 |
+
label="Added Custom Environment Variables",
|
769 |
+
visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
|
770 |
+
)
|
771 |
+
|
772 |
+
# Update and delete custom environment variables section
|
773 |
+
with gr.Accordion(
|
774 |
+
"Update or Delete Custom Environment Variables",
|
775 |
+
open=True,
|
776 |
+
visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
|
777 |
+
) as update_delete_accordion:
|
778 |
+
with gr.Row():
|
779 |
+
# Create dropdown menu to display all custom environment variables
|
780 |
+
custom_var_dropdown = gr.Dropdown(
|
781 |
+
choices=[
|
782 |
+
var["name"]
|
783 |
+
for var in ENV_GROUPS["Custom Environment Variables"]
|
784 |
+
],
|
785 |
+
label="Select Environment Variable",
|
786 |
+
interactive=True,
|
787 |
+
)
|
788 |
+
update_var_value = gr.Textbox(
|
789 |
+
label="New Environment Variable Value",
|
790 |
+
placeholder="Enter new value",
|
791 |
+
)
|
792 |
+
update_var_type = gr.Dropdown(
|
793 |
+
choices=["text", "password"], value="text", label="Type"
|
794 |
+
)
|
795 |
+
|
796 |
+
with gr.Row():
|
797 |
+
update_var_button = gr.Button(
|
798 |
+
"Update Environment Variable", variant="primary"
|
799 |
+
)
|
800 |
+
delete_var_button = gr.Button(
|
801 |
+
"Delete Environment Variable", variant="stop"
|
802 |
+
)
|
803 |
+
|
804 |
+
update_var_status = gr.Textbox(
|
805 |
+
label="Operation Status", interactive=False
|
806 |
+
)
|
807 |
+
|
808 |
+
# Add environment variable button click event
|
809 |
+
add_var_button.click(
|
810 |
+
fn=add_custom_env_var,
|
811 |
+
inputs=[new_var_name, new_var_value, new_var_type],
|
812 |
+
outputs=[add_var_status, custom_vars_list],
|
813 |
+
).then(
|
814 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
815 |
+
inputs=[custom_vars_list],
|
816 |
+
outputs=[update_delete_accordion],
|
817 |
+
)
|
818 |
+
|
819 |
+
# Update environment variable button click event
|
820 |
+
update_var_button.click(
|
821 |
+
fn=update_custom_env_var,
|
822 |
+
inputs=[custom_var_dropdown, update_var_value, update_var_type],
|
823 |
+
outputs=[update_var_status, custom_vars_list],
|
824 |
+
)
|
825 |
+
|
826 |
+
# Delete environment variable button click event
|
827 |
+
delete_var_button.click(
|
828 |
+
fn=delete_custom_env_var,
|
829 |
+
inputs=[custom_var_dropdown],
|
830 |
+
outputs=[update_var_status, custom_vars_list],
|
831 |
+
).then(
|
832 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
833 |
+
inputs=[custom_vars_list],
|
834 |
+
outputs=[update_delete_accordion],
|
835 |
+
)
|
836 |
+
|
837 |
+
# When custom environment variables list is updated, update dropdown menu options
|
838 |
+
custom_vars_list.change(
|
839 |
+
fn=lambda vars: {
|
840 |
+
"choices": [var["name"] for var in vars],
|
841 |
+
"value": None,
|
842 |
+
},
|
843 |
+
inputs=[custom_vars_list],
|
844 |
+
outputs=[custom_var_dropdown],
|
845 |
+
)
|
846 |
+
|
847 |
+
# Existing environment variable configuration
|
848 |
+
for group_name, vars in ENV_GROUPS.items():
|
849 |
+
if (
|
850 |
+
group_name != "Custom Environment Variables" or len(vars) > 0
|
851 |
+
): # Only show non-empty custom environment variable groups
|
852 |
+
with gr.Accordion(
|
853 |
+
group_name,
|
854 |
+
open=(group_name != "Custom Environment Variables"),
|
855 |
+
):
|
856 |
+
for var in vars:
|
857 |
+
# Add help information
|
858 |
+
gr.Markdown(f"**{var['help']}**")
|
859 |
+
|
860 |
+
if var["type"] == "password":
|
861 |
+
env_inputs[var["name"]] = gr.Textbox(
|
862 |
+
value=env_vars.get(var["name"], ""),
|
863 |
+
label=var["label"],
|
864 |
+
placeholder=f"Please enter {var['label']}",
|
865 |
+
type="password",
|
866 |
+
)
|
867 |
+
else:
|
868 |
+
env_inputs[var["name"]] = gr.Textbox(
|
869 |
+
value=env_vars.get(var["name"], ""),
|
870 |
+
label=var["label"],
|
871 |
+
placeholder=f"Please enter {var['label']}",
|
872 |
+
)
|
873 |
+
|
874 |
+
save_button = gr.Button("Save Environment Variables", variant="primary")
|
875 |
+
|
876 |
+
# Save environment variables
|
877 |
+
save_inputs = [
|
878 |
+
env_inputs[var_name]
|
879 |
+
for group in ENV_GROUPS.values()
|
880 |
+
for var in group
|
881 |
+
for var_name in [var["name"]]
|
882 |
+
if var_name in env_inputs
|
883 |
+
]
|
884 |
+
save_button.click(
|
885 |
+
fn=lambda *values: save_env_vars(
|
886 |
+
dict(
|
887 |
+
zip(
|
888 |
+
[
|
889 |
+
var["name"]
|
890 |
+
for group in ENV_GROUPS.values()
|
891 |
+
for var in group
|
892 |
+
if var["name"] in env_inputs
|
893 |
+
],
|
894 |
+
values,
|
895 |
+
)
|
896 |
+
)
|
897 |
+
),
|
898 |
+
inputs=save_inputs,
|
899 |
+
outputs=save_status,
|
900 |
+
)
|
901 |
+
|
902 |
+
# Run script
|
903 |
+
run_button.click(
|
904 |
+
fn=run_script,
|
905 |
+
inputs=[script_dropdown, question_input],
|
906 |
+
outputs=[
|
907 |
+
status_output,
|
908 |
+
answer_output,
|
909 |
+
log_output,
|
910 |
+
log_file_output,
|
911 |
+
chat_output,
|
912 |
+
],
|
913 |
+
show_progress=True,
|
914 |
+
)
|
915 |
+
|
916 |
+
# Terminate execution
|
917 |
+
stop_button.click(fn=terminate_process, inputs=[], outputs=[status_output])
|
918 |
+
|
919 |
+
# Add footer
|
920 |
+
gr.Markdown(
|
921 |
+
"""
|
922 |
+
### 📝 Instructions
|
923 |
+
|
924 |
+
- Select a model and enter your question
|
925 |
+
- Click the "Run" button to start execution
|
926 |
+
- To stop execution, click the "Stop" button
|
927 |
+
- View execution status and answers in the "Results" tab
|
928 |
+
- View complete logs in the "Run Logs" tab
|
929 |
+
- View conversation history in the "Chat History" tab (if available)
|
930 |
+
- Configure API keys and other environment variables in the "Environment Variable Configuration" tab
|
931 |
+
- You can add custom environment variables to meet special requirements
|
932 |
+
|
933 |
+
### ⚠️ Notes
|
934 |
+
|
935 |
+
- Running some models may require API keys, please make sure you have set the corresponding environment variables in the "Environment Variable Configuration" tab
|
936 |
+
- Some scripts may take a long time to run, please be patient
|
937 |
+
- If execution exceeds 30 minutes, the process will automatically terminate
|
938 |
+
- Your question will replace the default question in the script, ensure the question is compatible with the selected model
|
939 |
+
"""
|
940 |
+
)
|
941 |
+
|
942 |
+
return app
|
943 |
+
|
944 |
+
|
945 |
+
if __name__ == "__main__":
|
946 |
+
# Create and launch the application
|
947 |
+
app = create_ui()
|
948 |
+
app.queue().launch(share=True)
|
owl/camel/__init__.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
|
15 |
-
from camel.logger import disable_logging, enable_logging, set_log_level
|
16 |
-
|
17 |
-
__version__ = '0.2.11'
|
18 |
-
|
19 |
-
__all__ = [
|
20 |
-
'__version__',
|
21 |
-
'camel',
|
22 |
-
'disable_logging',
|
23 |
-
'enable_logging',
|
24 |
-
'set_log_level',
|
25 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/__pycache__/__init__.cpython-311.pyc
DELETED
Binary file (393 Bytes)
|
|
owl/camel/__pycache__/generators.cpython-311.pyc
DELETED
Binary file (18 kB)
|
|
owl/camel/__pycache__/human.cpython-311.pyc
DELETED
Binary file (6.13 kB)
|
|
owl/camel/__pycache__/logger.cpython-311.pyc
DELETED
Binary file (5.4 kB)
|
|
owl/camel/agents/__init__.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from .base import BaseAgent
|
15 |
-
from .chat_agent import ChatAgent
|
16 |
-
from .critic_agent import CriticAgent
|
17 |
-
from .embodied_agent import EmbodiedAgent
|
18 |
-
from .knowledge_graph_agent import KnowledgeGraphAgent
|
19 |
-
from .role_assignment_agent import RoleAssignmentAgent
|
20 |
-
from .search_agent import SearchAgent
|
21 |
-
from .task_agent import (
|
22 |
-
TaskCreationAgent,
|
23 |
-
TaskPlannerAgent,
|
24 |
-
TaskPrioritizationAgent,
|
25 |
-
TaskSpecifyAgent,
|
26 |
-
)
|
27 |
-
from .tool_agents.base import BaseToolAgent
|
28 |
-
from .tool_agents.hugging_face_tool_agent import HuggingFaceToolAgent
|
29 |
-
|
30 |
-
__all__ = [
|
31 |
-
'BaseAgent',
|
32 |
-
'ChatAgent',
|
33 |
-
'TaskSpecifyAgent',
|
34 |
-
'TaskPlannerAgent',
|
35 |
-
'TaskCreationAgent',
|
36 |
-
'TaskPrioritizationAgent',
|
37 |
-
'CriticAgent',
|
38 |
-
'BaseToolAgent',
|
39 |
-
'HuggingFaceToolAgent',
|
40 |
-
'EmbodiedAgent',
|
41 |
-
'RoleAssignmentAgent',
|
42 |
-
'SearchAgent',
|
43 |
-
'KnowledgeGraphAgent',
|
44 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/__pycache__/__init__.cpython-311.pyc
DELETED
Binary file (1.13 kB)
|
|
owl/camel/agents/__pycache__/base.cpython-311.pyc
DELETED
Binary file (1.12 kB)
|
|
owl/camel/agents/__pycache__/chat_agent.cpython-311.pyc
DELETED
Binary file (52.1 kB)
|
|
owl/camel/agents/__pycache__/critic_agent.cpython-311.pyc
DELETED
Binary file (8.66 kB)
|
|
owl/camel/agents/__pycache__/embodied_agent.cpython-311.pyc
DELETED
Binary file (8.93 kB)
|
|
owl/camel/agents/__pycache__/knowledge_graph_agent.cpython-311.pyc
DELETED
Binary file (10.1 kB)
|
|
owl/camel/agents/__pycache__/role_assignment_agent.cpython-311.pyc
DELETED
Binary file (6.47 kB)
|
|
owl/camel/agents/__pycache__/search_agent.cpython-311.pyc
DELETED
Binary file (5.37 kB)
|
|
owl/camel/agents/__pycache__/task_agent.cpython-311.pyc
DELETED
Binary file (16.9 kB)
|
|
owl/camel/agents/base.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from abc import ABC, abstractmethod
|
15 |
-
from typing import Any
|
16 |
-
|
17 |
-
|
18 |
-
class BaseAgent(ABC):
|
19 |
-
r"""An abstract base class for all CAMEL agents."""
|
20 |
-
|
21 |
-
@abstractmethod
|
22 |
-
def reset(self, *args: Any, **kwargs: Any) -> Any:
|
23 |
-
r"""Resets the agent to its initial state."""
|
24 |
-
pass
|
25 |
-
|
26 |
-
@abstractmethod
|
27 |
-
def step(self, *args: Any, **kwargs: Any) -> Any:
|
28 |
-
r"""Performs a single step of the agent."""
|
29 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/chat_agent.py
DELETED
@@ -1,1423 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from __future__ import annotations
|
15 |
-
|
16 |
-
import json
|
17 |
-
# import logging
|
18 |
-
import re
|
19 |
-
import uuid
|
20 |
-
from collections import defaultdict
|
21 |
-
from typing import (
|
22 |
-
TYPE_CHECKING,
|
23 |
-
Any,
|
24 |
-
Callable,
|
25 |
-
Dict,
|
26 |
-
List,
|
27 |
-
Optional,
|
28 |
-
Tuple,
|
29 |
-
Type,
|
30 |
-
Union,
|
31 |
-
)
|
32 |
-
|
33 |
-
from loguru import logger
|
34 |
-
|
35 |
-
from openai.types.chat import ChatCompletionMessageToolCall
|
36 |
-
from openai.types.chat.chat_completion_message_tool_call import Function
|
37 |
-
from pydantic import BaseModel
|
38 |
-
|
39 |
-
from camel.agents.base import BaseAgent
|
40 |
-
from camel.memories import (
|
41 |
-
AgentMemory,
|
42 |
-
ChatHistoryMemory,
|
43 |
-
MemoryRecord,
|
44 |
-
ScoreBasedContextCreator,
|
45 |
-
)
|
46 |
-
from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
|
47 |
-
from camel.models import (
|
48 |
-
BaseModelBackend,
|
49 |
-
ModelFactory,
|
50 |
-
ModelManager,
|
51 |
-
ModelProcessingError,
|
52 |
-
)
|
53 |
-
from camel.responses import ChatAgentResponse
|
54 |
-
from camel.types import (
|
55 |
-
ChatCompletion,
|
56 |
-
ChatCompletionChunk,
|
57 |
-
ModelPlatformType,
|
58 |
-
ModelType,
|
59 |
-
OpenAIBackendRole,
|
60 |
-
RoleType,
|
61 |
-
)
|
62 |
-
from camel.utils import (
|
63 |
-
func_string_to_callable,
|
64 |
-
get_model_encoding,
|
65 |
-
get_pydantic_object_schema,
|
66 |
-
json_to_function_code,
|
67 |
-
)
|
68 |
-
|
69 |
-
if TYPE_CHECKING:
|
70 |
-
from openai import Stream
|
71 |
-
|
72 |
-
from camel.terminators import ResponseTerminator
|
73 |
-
from camel.toolkits import FunctionTool
|
74 |
-
|
75 |
-
|
76 |
-
# logger = logging.getLogger(__name__)
|
77 |
-
|
78 |
-
# AgentOps decorator setting
|
79 |
-
try:
|
80 |
-
import os
|
81 |
-
|
82 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
83 |
-
from agentops import track_agent
|
84 |
-
else:
|
85 |
-
raise ImportError
|
86 |
-
except (ImportError, AttributeError):
|
87 |
-
from camel.utils import track_agent
|
88 |
-
|
89 |
-
|
90 |
-
class FunctionCallingRecord(BaseModel):
|
91 |
-
r"""Historical records of functions called in the conversation.
|
92 |
-
|
93 |
-
Attributes:
|
94 |
-
func_name (str): The name of the function being called.
|
95 |
-
args (Dict[str, Any]): The dictionary of arguments passed to
|
96 |
-
the function.
|
97 |
-
result (Any): The execution result of calling this function.
|
98 |
-
"""
|
99 |
-
|
100 |
-
func_name: str
|
101 |
-
args: Dict[str, Any]
|
102 |
-
result: Any
|
103 |
-
|
104 |
-
def __str__(self) -> str:
|
105 |
-
r"""Overridden version of the string function.
|
106 |
-
|
107 |
-
Returns:
|
108 |
-
str: Modified string to represent the function calling.
|
109 |
-
"""
|
110 |
-
return (
|
111 |
-
f"Function Execution: {self.func_name}\n"
|
112 |
-
f"\tArgs: {self.args}\n"
|
113 |
-
f"\tResult: {self.result}"
|
114 |
-
)
|
115 |
-
|
116 |
-
def as_dict(self) -> dict[str, Any]:
|
117 |
-
r"""Returns the function calling record as a dictionary.
|
118 |
-
|
119 |
-
Returns:
|
120 |
-
dict[str, Any]: The function calling record as a dictionary.
|
121 |
-
"""
|
122 |
-
return self.model_dump()
|
123 |
-
|
124 |
-
|
125 |
-
@track_agent(name="ChatAgent")
|
126 |
-
class ChatAgent(BaseAgent):
|
127 |
-
r"""Class for managing conversations of CAMEL Chat Agents.
|
128 |
-
|
129 |
-
Args:
|
130 |
-
system_message (Union[BaseMessage, str], optional): The system message
|
131 |
-
for the chat agent.
|
132 |
-
model (BaseModelBackend, optional): The model backend to use for
|
133 |
-
generating responses. (default: :obj:`ModelPlatformType.DEFAULT`
|
134 |
-
with `ModelType.DEFAULT`)
|
135 |
-
memory (AgentMemory, optional): The agent memory for managing chat
|
136 |
-
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
|
137 |
-
(default: :obj:`None`)
|
138 |
-
message_window_size (int, optional): The maximum number of previous
|
139 |
-
messages to include in the context window. If `None`, no windowing
|
140 |
-
is performed. (default: :obj:`None`)
|
141 |
-
token_limit (int, optional): The maximum number of tokens in a context.
|
142 |
-
The context will be automatically pruned to fulfill the limitation.
|
143 |
-
If `None`, it will be set according to the backend model.
|
144 |
-
(default: :obj:`None`)
|
145 |
-
output_language (str, optional): The language to be output by the
|
146 |
-
agent. (default: :obj:`None`)
|
147 |
-
tools (List[FunctionTool], optional): List of available
|
148 |
-
:obj:`FunctionTool`. (default: :obj:`None`)
|
149 |
-
external_tools (List[FunctionTool], optional): List of external tools
|
150 |
-
(:obj:`FunctionTool`) bind to one chat agent. When these tools
|
151 |
-
are called, the agent will directly return the request instead of
|
152 |
-
processing it. (default: :obj:`None`)
|
153 |
-
response_terminators (List[ResponseTerminator], optional): List of
|
154 |
-
:obj:`ResponseTerminator` bind to one chat agent.
|
155 |
-
(default: :obj:`None`)
|
156 |
-
scheduling_strategy (str): name of function that defines how to select
|
157 |
-
the next model in ModelManager. (default: :str:`round_robin`)
|
158 |
-
"""
|
159 |
-
|
160 |
-
def __init__(
|
161 |
-
self,
|
162 |
-
system_message: Optional[Union[BaseMessage, str]] = None,
|
163 |
-
model: Optional[
|
164 |
-
Union[BaseModelBackend, List[BaseModelBackend]]
|
165 |
-
] = None,
|
166 |
-
memory: Optional[AgentMemory] = None,
|
167 |
-
message_window_size: Optional[int] = None,
|
168 |
-
token_limit: Optional[int] = None,
|
169 |
-
output_language: Optional[str] = None,
|
170 |
-
tools: Optional[List[FunctionTool]] = None,
|
171 |
-
external_tools: Optional[List[FunctionTool]] = None,
|
172 |
-
response_terminators: Optional[List[ResponseTerminator]] = None,
|
173 |
-
scheduling_strategy: str = "round_robin",
|
174 |
-
) -> None:
|
175 |
-
from copy import deepcopy
|
176 |
-
if isinstance(system_message, str):
|
177 |
-
system_message = BaseMessage.make_assistant_message(
|
178 |
-
role_name='Assistant', content=system_message
|
179 |
-
)
|
180 |
-
|
181 |
-
self.orig_sys_message: Optional[BaseMessage] = system_message
|
182 |
-
self._system_message: Optional[BaseMessage] = system_message
|
183 |
-
self.role_name: str = (
|
184 |
-
getattr(system_message, 'role_name', None) or "assistant"
|
185 |
-
)
|
186 |
-
self.role_type: RoleType = (
|
187 |
-
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
|
188 |
-
)
|
189 |
-
self.model_backend = ModelManager(
|
190 |
-
model
|
191 |
-
if model is not None
|
192 |
-
else ModelFactory.create(
|
193 |
-
model_platform=ModelPlatformType.DEFAULT,
|
194 |
-
model_type=ModelType.DEFAULT,
|
195 |
-
),
|
196 |
-
scheduling_strategy=scheduling_strategy,
|
197 |
-
)
|
198 |
-
|
199 |
-
self.model_type = self.model_backend.model_type
|
200 |
-
|
201 |
-
# Tool registration
|
202 |
-
external_tools = external_tools or []
|
203 |
-
tools = tools or []
|
204 |
-
all_tools = tools + external_tools
|
205 |
-
self.external_tool_names = [
|
206 |
-
tool.get_function_name() for tool in external_tools
|
207 |
-
]
|
208 |
-
self.func_dict = {
|
209 |
-
tool.get_function_name(): tool.func for tool in all_tools
|
210 |
-
}
|
211 |
-
self.tool_dict = {tool.get_function_name(): tool for tool in all_tools}
|
212 |
-
self._all_tools = all_tools
|
213 |
-
|
214 |
-
# If the user set tools from `ChatAgent`, it will override the
|
215 |
-
# configured tools in `BaseModelBackend`.
|
216 |
-
if all_tools:
|
217 |
-
# logger.warning(
|
218 |
-
# "Overriding the configured tools in `BaseModelBackend` with the tools from `ChatAgent`."
|
219 |
-
# )
|
220 |
-
tool_schema_list = [
|
221 |
-
tool.get_openai_tool_schema() for tool in all_tools
|
222 |
-
]
|
223 |
-
self.model_backend.model_config_dict['tools'] = tool_schema_list
|
224 |
-
self.tool_schema_list = tool_schema_list
|
225 |
-
|
226 |
-
from copy import deepcopy
|
227 |
-
self.model_config_dict = deepcopy(self.model_backend.model_config_dict)
|
228 |
-
|
229 |
-
self.model_token_limit = token_limit or self.model_backend.token_limit
|
230 |
-
context_creator = ScoreBasedContextCreator(
|
231 |
-
self.model_backend.token_counter,
|
232 |
-
self.model_token_limit,
|
233 |
-
)
|
234 |
-
self.memory: AgentMemory = memory or ChatHistoryMemory(
|
235 |
-
context_creator, window_size=message_window_size
|
236 |
-
)
|
237 |
-
|
238 |
-
self.output_language: Optional[str] = output_language
|
239 |
-
if self.output_language is not None:
|
240 |
-
self.set_output_language(self.output_language)
|
241 |
-
|
242 |
-
self.terminated: bool = False
|
243 |
-
self.response_terminators = response_terminators or []
|
244 |
-
self.init_messages()
|
245 |
-
|
246 |
-
self.tool_prompt_added = False
|
247 |
-
|
248 |
-
# ruff: noqa: E501
|
249 |
-
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
250 |
-
r"""Generates a tool prompt based on the provided tool schema list.
|
251 |
-
|
252 |
-
Args:
|
253 |
-
tool_schema_list (List[Dict]): A list of dictionaries, each
|
254 |
-
containing a tool schema.
|
255 |
-
|
256 |
-
Returns:
|
257 |
-
str: A string representing the tool prompt.
|
258 |
-
"""
|
259 |
-
tool_prompts = []
|
260 |
-
|
261 |
-
for tool in tool_schema_list:
|
262 |
-
tool_info = tool['function']
|
263 |
-
tool_name = tool_info['name']
|
264 |
-
tool_description = tool_info['description']
|
265 |
-
tool_json = json.dumps(tool_info, indent=4)
|
266 |
-
|
267 |
-
prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
|
268 |
-
tool_prompts.append(prompt)
|
269 |
-
|
270 |
-
tool_prompt_str = "\n".join(tool_prompts)
|
271 |
-
|
272 |
-
final_prompt = f'''
|
273 |
-
# Tool prompt
|
274 |
-
TOOL_PROMPT = f"""
|
275 |
-
You have access to the following functions:
|
276 |
-
|
277 |
-
{tool_prompt_str}
|
278 |
-
|
279 |
-
If you choose to call a function ONLY reply in the following format with no
|
280 |
-
prefix or suffix:
|
281 |
-
|
282 |
-
<function=example_function_name>{{"example_name": "example_value"}}
|
283 |
-
</function>
|
284 |
-
|
285 |
-
Reminder:
|
286 |
-
- Function calls MUST follow the specified format, start with <function=
|
287 |
-
and end with </function>
|
288 |
-
- Required parameters MUST be specified
|
289 |
-
- Only call one function at a time
|
290 |
-
- Put the entire function call reply on one line
|
291 |
-
- If there is no function call available, answer the question like normal
|
292 |
-
with your current knowledge and do not tell the user about function calls
|
293 |
-
"""
|
294 |
-
'''
|
295 |
-
return final_prompt
|
296 |
-
|
297 |
-
def _parse_tool_response(self, response: str):
|
298 |
-
r"""Parses the tool response to extract the function name and
|
299 |
-
arguments.
|
300 |
-
|
301 |
-
Args:
|
302 |
-
response (str): The response from the model containing the
|
303 |
-
function call.
|
304 |
-
|
305 |
-
Returns:
|
306 |
-
Optional[Dict[str, Any]]: The parsed function name and arguments
|
307 |
-
if found, otherwise :obj:`None`.
|
308 |
-
"""
|
309 |
-
function_regex = r"<function=(\w+)>(.*?)</function>"
|
310 |
-
match = re.search(function_regex, response)
|
311 |
-
|
312 |
-
if match:
|
313 |
-
function_name, args_string = match.groups()
|
314 |
-
try:
|
315 |
-
args = json.loads(args_string)
|
316 |
-
return {"function": function_name, "arguments": args}
|
317 |
-
except json.JSONDecodeError as error:
|
318 |
-
print(f"Error parsing function arguments: {error}")
|
319 |
-
return None
|
320 |
-
return None
|
321 |
-
|
322 |
-
def reset(self):
|
323 |
-
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
324 |
-
self.terminated = False
|
325 |
-
self.init_messages()
|
326 |
-
for terminator in self.response_terminators:
|
327 |
-
terminator.reset()
|
328 |
-
|
329 |
-
@property
|
330 |
-
def system_message(self) -> Optional[BaseMessage]:
|
331 |
-
r"""The getter method for the property :obj:`system_message`.
|
332 |
-
|
333 |
-
Returns:
|
334 |
-
Optional[BaseMessage]: The system message of this agent if set,
|
335 |
-
else :obj:`None`.
|
336 |
-
"""
|
337 |
-
return self._system_message
|
338 |
-
|
339 |
-
@system_message.setter
|
340 |
-
def system_message(self, message: BaseMessage) -> None:
|
341 |
-
r"""The setter method for the property :obj:`system_message`.
|
342 |
-
|
343 |
-
Args:
|
344 |
-
message (BaseMessage): The message to be set as the
|
345 |
-
new system message of this agent.
|
346 |
-
"""
|
347 |
-
self._system_message = message
|
348 |
-
|
349 |
-
def is_tools_added(self) -> bool:
|
350 |
-
r"""Whether OpenAI function calling is enabled for this agent.
|
351 |
-
|
352 |
-
Returns:
|
353 |
-
bool: Whether OpenAI function calling is enabled for this
|
354 |
-
agent, determined by whether the dictionary of tools
|
355 |
-
is empty.
|
356 |
-
"""
|
357 |
-
return len(self.func_dict) > 0
|
358 |
-
|
359 |
-
def update_memory(
|
360 |
-
self, message: BaseMessage, role: OpenAIBackendRole
|
361 |
-
) -> None:
|
362 |
-
r"""Updates the agent memory with a new message.
|
363 |
-
|
364 |
-
Args:
|
365 |
-
message (BaseMessage): The new message to add to the stored
|
366 |
-
messages.
|
367 |
-
role (OpenAIBackendRole): The backend role type.
|
368 |
-
"""
|
369 |
-
self.memory.write_record(
|
370 |
-
MemoryRecord(message=message, role_at_backend=role)
|
371 |
-
)
|
372 |
-
|
373 |
-
def set_output_language(self, output_language: str) -> BaseMessage:
|
374 |
-
r"""Sets the output language for the system message. This method
|
375 |
-
updates the output language for the system message. The output
|
376 |
-
language determines the language in which the output text should be
|
377 |
-
generated.
|
378 |
-
|
379 |
-
Args:
|
380 |
-
output_language (str): The desired output language.
|
381 |
-
|
382 |
-
Returns:
|
383 |
-
BaseMessage: The updated system message object.
|
384 |
-
"""
|
385 |
-
self.output_language = output_language
|
386 |
-
language_prompt = (
|
387 |
-
"\nRegardless of the input language, "
|
388 |
-
f"you must output text in {output_language}."
|
389 |
-
)
|
390 |
-
if self.orig_sys_message is not None:
|
391 |
-
content = self.orig_sys_message.content + language_prompt
|
392 |
-
self._system_message = self.orig_sys_message.create_new_instance(
|
393 |
-
content
|
394 |
-
)
|
395 |
-
else:
|
396 |
-
self._system_message = BaseMessage.make_assistant_message(
|
397 |
-
role_name="Assistant",
|
398 |
-
content=language_prompt,
|
399 |
-
)
|
400 |
-
|
401 |
-
system_record = MemoryRecord(
|
402 |
-
message=self._system_message,
|
403 |
-
role_at_backend=OpenAIBackendRole.SYSTEM,
|
404 |
-
)
|
405 |
-
self.memory.clear()
|
406 |
-
self.memory.write_record(system_record)
|
407 |
-
return self._system_message
|
408 |
-
|
409 |
-
def get_info(
|
410 |
-
self,
|
411 |
-
session_id: Optional[str],
|
412 |
-
usage: Optional[Dict[str, int]],
|
413 |
-
termination_reasons: List[str],
|
414 |
-
num_tokens: int,
|
415 |
-
tool_calls: List[FunctionCallingRecord],
|
416 |
-
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
417 |
-
) -> Dict[str, Any]:
|
418 |
-
r"""Returns a dictionary containing information about the chat session.
|
419 |
-
|
420 |
-
Args:
|
421 |
-
session_id (str, optional): The ID of the chat session.
|
422 |
-
usage (Dict[str, int], optional): Information about the usage of
|
423 |
-
the LLM model.
|
424 |
-
termination_reasons (List[str]): The reasons for the termination
|
425 |
-
of the chat session.
|
426 |
-
num_tokens (int): The number of tokens used in the chat session.
|
427 |
-
tool_calls (List[FunctionCallingRecord]): The list of function
|
428 |
-
calling records, containing the information of called tools.
|
429 |
-
external_tool_request
|
430 |
-
(Optional[ChatCompletionMessageToolCall], optional):
|
431 |
-
The tool calling request of external tools from the model.
|
432 |
-
These requests are directly returned to the user instead of
|
433 |
-
being processed by the agent automatically.
|
434 |
-
(default: :obj:`None`)
|
435 |
-
|
436 |
-
Returns:
|
437 |
-
Dict[str, Any]: The chat session information.
|
438 |
-
"""
|
439 |
-
return {
|
440 |
-
"id": session_id,
|
441 |
-
"usage": usage,
|
442 |
-
"termination_reasons": termination_reasons,
|
443 |
-
"num_tokens": num_tokens,
|
444 |
-
"tool_calls": tool_calls,
|
445 |
-
"external_tool_request": external_tool_request,
|
446 |
-
}
|
447 |
-
|
448 |
-
def init_messages(self) -> None:
|
449 |
-
r"""Initializes the stored messages list with the current system
|
450 |
-
message.
|
451 |
-
"""
|
452 |
-
if self._system_message is not None:
|
453 |
-
system_record = MemoryRecord(
|
454 |
-
message=self._system_message,
|
455 |
-
role_at_backend=OpenAIBackendRole.SYSTEM,
|
456 |
-
)
|
457 |
-
self.memory.clear()
|
458 |
-
self.memory.write_record(system_record)
|
459 |
-
else:
|
460 |
-
self.memory.clear()
|
461 |
-
|
462 |
-
def _transform_function_calling_format(self, openai_messages: List[dict]):
|
463 |
-
r"""Used in deepseek-chat backend. It can modify function calling records' format to match the deepseek-chat backend's format."""
|
464 |
-
from copy import deepcopy
|
465 |
-
_messages = deepcopy(openai_messages)
|
466 |
-
modified_messages = []
|
467 |
-
for message in _messages:
|
468 |
-
if message['role'] == 'function':
|
469 |
-
new_message = {
|
470 |
-
'role': 'tool',
|
471 |
-
'tool_call_id': message['name'],
|
472 |
-
'content': message['content']
|
473 |
-
}
|
474 |
-
modified_messages.append(new_message)
|
475 |
-
else:
|
476 |
-
modified_messages.append(message)
|
477 |
-
|
478 |
-
return modified_messages
|
479 |
-
|
480 |
-
|
481 |
-
def record_message(self, message: BaseMessage) -> None:
|
482 |
-
r"""Records the externally provided message into the agent memory as if
|
483 |
-
it were an answer of the :obj:`ChatAgent` from the backend. Currently,
|
484 |
-
the choice of the critic is submitted with this method.
|
485 |
-
|
486 |
-
Args:
|
487 |
-
message (BaseMessage): An external message to be recorded in the
|
488 |
-
memory.
|
489 |
-
"""
|
490 |
-
self.update_memory(message, OpenAIBackendRole.ASSISTANT)
|
491 |
-
|
492 |
-
def step(
|
493 |
-
self,
|
494 |
-
input_message: Union[BaseMessage, str],
|
495 |
-
response_format: Optional[Type[BaseModel]] = None,
|
496 |
-
) -> ChatAgentResponse:
|
497 |
-
r"""Performs a single step in the chat session by generating a response
|
498 |
-
to the input message.
|
499 |
-
|
500 |
-
Args:
|
501 |
-
input_message (Union[BaseMessage, str]): The input message to the
|
502 |
-
agent. For BaseMessage input, its `role` field that specifies
|
503 |
-
the role at backend may be either `user` or `assistant` but it
|
504 |
-
will be set to `user` anyway since for the self agent any
|
505 |
-
incoming message is external. For str input, the `role_name` would be `User`.
|
506 |
-
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
507 |
-
model class that includes value types and field descriptions
|
508 |
-
used to generate a structured response by LLM. This schema
|
509 |
-
helps in defining the expected output format. (default:
|
510 |
-
:obj:`None`)
|
511 |
-
|
512 |
-
Returns:
|
513 |
-
ChatAgentResponse: A struct containing the output messages,
|
514 |
-
a boolean indicating whether the chat session has terminated,
|
515 |
-
and information about the chat session.
|
516 |
-
"""
|
517 |
-
from copy import deepcopy
|
518 |
-
self.model_backend.model_config_dict = deepcopy(self.model_config_dict)
|
519 |
-
self.tool_dict = {tool.get_function_name(): tool for tool in self._all_tools}
|
520 |
-
if (
|
521 |
-
self.model_backend.model_config_dict.get("response_format")
|
522 |
-
and response_format
|
523 |
-
):
|
524 |
-
raise ValueError(
|
525 |
-
"The `response_format` parameter cannot be set both in "
|
526 |
-
"the model configuration and in the ChatAgent step."
|
527 |
-
)
|
528 |
-
|
529 |
-
if isinstance(input_message, str):
|
530 |
-
input_message = BaseMessage.make_user_message(
|
531 |
-
role_name='User', content=input_message
|
532 |
-
)
|
533 |
-
|
534 |
-
if "llama" in self.model_type.lower():
|
535 |
-
if (
|
536 |
-
self.model_backend.model_config_dict.get("tools", None)
|
537 |
-
and not self.tool_prompt_added
|
538 |
-
):
|
539 |
-
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
540 |
-
|
541 |
-
tool_sys_msg = BaseMessage.make_assistant_message(
|
542 |
-
role_name="Assistant",
|
543 |
-
content=tool_prompt,
|
544 |
-
)
|
545 |
-
|
546 |
-
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
547 |
-
self.tool_prompt_added = True
|
548 |
-
|
549 |
-
self.update_memory(input_message, OpenAIBackendRole.USER)
|
550 |
-
|
551 |
-
tool_call_records: List[FunctionCallingRecord] = []
|
552 |
-
while True:
|
553 |
-
# Check if token has exceeded
|
554 |
-
try:
|
555 |
-
openai_messages, num_tokens = self.memory.get_context()
|
556 |
-
except RuntimeError as e:
|
557 |
-
return self._step_token_exceed(
|
558 |
-
e.args[1], tool_call_records, "max_tokens_exceeded"
|
559 |
-
)
|
560 |
-
(
|
561 |
-
response,
|
562 |
-
output_messages,
|
563 |
-
finish_reasons,
|
564 |
-
usage_dict,
|
565 |
-
response_id,
|
566 |
-
) = self._step_model_response(openai_messages, num_tokens)
|
567 |
-
# If the model response is not a function call, meaning the
|
568 |
-
# model has generated a message response, break the loop
|
569 |
-
if (
|
570 |
-
not self.is_tools_added()
|
571 |
-
or not isinstance(response, ChatCompletion)
|
572 |
-
or "</function>" not in response.choices[0].message.content # type: ignore[operator]
|
573 |
-
):
|
574 |
-
break
|
575 |
-
|
576 |
-
parsed_content = self._parse_tool_response(
|
577 |
-
response.choices[0].message.content # type: ignore[arg-type]
|
578 |
-
)
|
579 |
-
|
580 |
-
response.choices[0].message.tool_calls = [
|
581 |
-
ChatCompletionMessageToolCall(
|
582 |
-
id=str(uuid.uuid4()),
|
583 |
-
function=Function(
|
584 |
-
arguments=str(parsed_content["arguments"]).replace(
|
585 |
-
"'", '"'
|
586 |
-
),
|
587 |
-
name=str(parsed_content["function"]),
|
588 |
-
),
|
589 |
-
type="function",
|
590 |
-
)
|
591 |
-
]
|
592 |
-
|
593 |
-
# Check for external tool call
|
594 |
-
tool_call_request = response.choices[0].message.tool_calls[0]
|
595 |
-
if tool_call_request.function.name in self.external_tool_names:
|
596 |
-
# if model calls an external tool, directly return the
|
597 |
-
# request
|
598 |
-
info = self._step_get_info(
|
599 |
-
output_messages,
|
600 |
-
finish_reasons,
|
601 |
-
usage_dict,
|
602 |
-
response_id,
|
603 |
-
tool_call_records,
|
604 |
-
num_tokens,
|
605 |
-
tool_call_request,
|
606 |
-
)
|
607 |
-
return ChatAgentResponse(
|
608 |
-
msgs=output_messages,
|
609 |
-
terminated=self.terminated,
|
610 |
-
info=info,
|
611 |
-
)
|
612 |
-
|
613 |
-
# Normal function calling
|
614 |
-
tool_call_records.append(
|
615 |
-
self._step_tool_call_and_update(response)
|
616 |
-
)
|
617 |
-
|
618 |
-
if response_format is not None:
|
619 |
-
(
|
620 |
-
output_messages,
|
621 |
-
finish_reasons,
|
622 |
-
usage_dict,
|
623 |
-
response_id,
|
624 |
-
tool_call,
|
625 |
-
num_tokens,
|
626 |
-
) = self._structure_output_with_function(response_format)
|
627 |
-
tool_call_records.append(tool_call)
|
628 |
-
|
629 |
-
info = self._step_get_info(
|
630 |
-
output_messages,
|
631 |
-
finish_reasons,
|
632 |
-
usage_dict,
|
633 |
-
response_id,
|
634 |
-
tool_call_records,
|
635 |
-
num_tokens,
|
636 |
-
)
|
637 |
-
|
638 |
-
if len(output_messages) == 1:
|
639 |
-
# Auto record if the output result is a single message
|
640 |
-
self.record_message(output_messages[0])
|
641 |
-
else:
|
642 |
-
logger.warning(
|
643 |
-
"Multiple messages returned in `step()`, message won't be "
|
644 |
-
"recorded automatically. Please call `record_message()` "
|
645 |
-
"to record the selected message manually."
|
646 |
-
)
|
647 |
-
|
648 |
-
return ChatAgentResponse(
|
649 |
-
msgs=output_messages, terminated=self.terminated, info=info
|
650 |
-
)
|
651 |
-
|
652 |
-
else:
|
653 |
-
self.update_memory(input_message, OpenAIBackendRole.USER)
|
654 |
-
# try:
|
655 |
-
|
656 |
-
tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
|
657 |
-
while True:
|
658 |
-
# Check if token has exceeded
|
659 |
-
try:
|
660 |
-
openai_messages, num_tokens = self.memory.get_context()
|
661 |
-
except RuntimeError as e:
|
662 |
-
return self._step_token_exceed(
|
663 |
-
e.args[1], tool_call_records, "max_tokens_exceeded"
|
664 |
-
)
|
665 |
-
|
666 |
-
(
|
667 |
-
response,
|
668 |
-
output_messages,
|
669 |
-
finish_reasons,
|
670 |
-
usage_dict,
|
671 |
-
response_id,
|
672 |
-
) = self._step_model_response(openai_messages, num_tokens)
|
673 |
-
# If the model response is not a function call, meaning the
|
674 |
-
# model has generated a message response, break the loop
|
675 |
-
if (
|
676 |
-
not self.is_tools_added()
|
677 |
-
or not isinstance(response, ChatCompletion)
|
678 |
-
or not response.choices[0].message.tool_calls
|
679 |
-
):
|
680 |
-
break
|
681 |
-
|
682 |
-
# Check for external tool call
|
683 |
-
tool_call_request = response.choices[0].message.tool_calls[0]
|
684 |
-
|
685 |
-
if tool_call_request.function.name in self.external_tool_names:
|
686 |
-
# if model calls an external tool, directly return the
|
687 |
-
# request
|
688 |
-
info = self._step_get_info(
|
689 |
-
output_messages,
|
690 |
-
finish_reasons,
|
691 |
-
usage_dict,
|
692 |
-
response_id,
|
693 |
-
tool_call_records,
|
694 |
-
num_tokens,
|
695 |
-
tool_call_request,
|
696 |
-
)
|
697 |
-
return ChatAgentResponse(
|
698 |
-
msgs=output_messages,
|
699 |
-
terminated=self.terminated,
|
700 |
-
info=info,
|
701 |
-
)
|
702 |
-
|
703 |
-
# Normal function calling
|
704 |
-
tool_call_records.append(
|
705 |
-
self._step_tool_call_and_update(response)
|
706 |
-
)
|
707 |
-
|
708 |
-
if (
|
709 |
-
response_format is not None
|
710 |
-
and self.model_type.support_native_tool_calling
|
711 |
-
):
|
712 |
-
(
|
713 |
-
output_messages,
|
714 |
-
finish_reasons,
|
715 |
-
usage_dict,
|
716 |
-
response_id,
|
717 |
-
tool_call,
|
718 |
-
num_tokens,
|
719 |
-
) = self._structure_output_with_function(response_format)
|
720 |
-
tool_call_records.append(tool_call)
|
721 |
-
|
722 |
-
info = self._step_get_info(
|
723 |
-
output_messages,
|
724 |
-
finish_reasons,
|
725 |
-
usage_dict,
|
726 |
-
response_id,
|
727 |
-
tool_call_records,
|
728 |
-
num_tokens,
|
729 |
-
)
|
730 |
-
|
731 |
-
if len(output_messages) == 1:
|
732 |
-
# Auto record if the output result is a single message
|
733 |
-
self.record_message(output_messages[0])
|
734 |
-
else:
|
735 |
-
logger.warning(
|
736 |
-
"Multiple messages returned in `step()`, message won't be "
|
737 |
-
"recorded automatically. Please call `record_message()` "
|
738 |
-
"to record the selected message manually."
|
739 |
-
)
|
740 |
-
|
741 |
-
return ChatAgentResponse(
|
742 |
-
msgs=output_messages, terminated=self.terminated, info=info
|
743 |
-
)
|
744 |
-
|
745 |
-
# except Exception as e:
|
746 |
-
# logger.error(e)
|
747 |
-
# breakpoint()
|
748 |
-
# raise e
|
749 |
-
|
750 |
-
async def step_async(
|
751 |
-
self,
|
752 |
-
input_message: Union[BaseMessage, str],
|
753 |
-
response_format: Optional[Type[BaseModel]] = None,
|
754 |
-
) -> ChatAgentResponse:
|
755 |
-
r"""Performs a single step in the chat session by generating a response
|
756 |
-
to the input message. This agent step can call async function calls.
|
757 |
-
|
758 |
-
Args:
|
759 |
-
input_message (Union[BaseMessage, str]): The input message to the
|
760 |
-
agent. For BaseMessage input, its `role` field that specifies
|
761 |
-
the role at backend may be either `user` or `assistant` but it
|
762 |
-
will be set to `user` anyway since for the self agent any
|
763 |
-
incoming message is external. For str input, the `role_name` would be `User`.
|
764 |
-
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
765 |
-
model class that includes value types and field descriptions
|
766 |
-
used to generate a structured response by LLM. This schema
|
767 |
-
helps in defining the expected output format. (default:
|
768 |
-
:obj:`None`)
|
769 |
-
|
770 |
-
Returns:
|
771 |
-
ChatAgentResponse: A struct containing the output messages,
|
772 |
-
a boolean indicating whether the chat session has terminated,
|
773 |
-
and information about the chat session.
|
774 |
-
"""
|
775 |
-
if isinstance(input_message, str):
|
776 |
-
input_message = BaseMessage.make_user_message(
|
777 |
-
role_name='User', content=input_message
|
778 |
-
)
|
779 |
-
|
780 |
-
self.update_memory(input_message, OpenAIBackendRole.USER)
|
781 |
-
|
782 |
-
tool_call_records: List[FunctionCallingRecord] = []
|
783 |
-
while True:
|
784 |
-
try:
|
785 |
-
openai_messages, num_tokens = self.memory.get_context()
|
786 |
-
except RuntimeError as e:
|
787 |
-
return self._step_token_exceed(
|
788 |
-
e.args[1], tool_call_records, "max_tokens_exceeded"
|
789 |
-
)
|
790 |
-
|
791 |
-
(
|
792 |
-
response,
|
793 |
-
output_messages,
|
794 |
-
finish_reasons,
|
795 |
-
usage_dict,
|
796 |
-
response_id,
|
797 |
-
) = self._step_model_response(openai_messages, num_tokens)
|
798 |
-
|
799 |
-
if (
|
800 |
-
not self.is_tools_added()
|
801 |
-
or not isinstance(response, ChatCompletion)
|
802 |
-
or response.choices[0].message.tool_calls is None
|
803 |
-
):
|
804 |
-
break
|
805 |
-
|
806 |
-
# Check for external tool call
|
807 |
-
tool_call_request = response.choices[0].message.tool_calls[0]
|
808 |
-
if tool_call_request.function.name in self.external_tool_names:
|
809 |
-
# if model calls an external tool, directly return the request
|
810 |
-
info = self._step_get_info(
|
811 |
-
output_messages,
|
812 |
-
finish_reasons,
|
813 |
-
usage_dict,
|
814 |
-
response_id,
|
815 |
-
tool_call_records,
|
816 |
-
num_tokens,
|
817 |
-
tool_call_request,
|
818 |
-
)
|
819 |
-
return ChatAgentResponse(
|
820 |
-
msgs=output_messages, terminated=self.terminated, info=info
|
821 |
-
)
|
822 |
-
|
823 |
-
# Normal function calling
|
824 |
-
tool_call_records.append(
|
825 |
-
await self._step_tool_call_and_update_async(response)
|
826 |
-
)
|
827 |
-
|
828 |
-
if (
|
829 |
-
response_format is not None
|
830 |
-
and self.model_type.support_native_tool_calling
|
831 |
-
):
|
832 |
-
(
|
833 |
-
output_messages,
|
834 |
-
finish_reasons,
|
835 |
-
usage_dict,
|
836 |
-
response_id,
|
837 |
-
tool_call_record,
|
838 |
-
num_tokens,
|
839 |
-
) = self._structure_output_with_function(response_format)
|
840 |
-
tool_call_records.append(tool_call_record)
|
841 |
-
|
842 |
-
info = self._step_get_info(
|
843 |
-
output_messages,
|
844 |
-
finish_reasons,
|
845 |
-
usage_dict,
|
846 |
-
response_id,
|
847 |
-
tool_call_records,
|
848 |
-
num_tokens,
|
849 |
-
)
|
850 |
-
|
851 |
-
if len(output_messages) == 1:
|
852 |
-
# Auto record if the output result is a single message
|
853 |
-
self.record_message(output_messages[0])
|
854 |
-
else:
|
855 |
-
logger.warning(
|
856 |
-
"Multiple messages returned in `step()`, message won't be "
|
857 |
-
"recorded automatically. Please call `record_message()` to "
|
858 |
-
"record the selected message manually."
|
859 |
-
)
|
860 |
-
|
861 |
-
return ChatAgentResponse(
|
862 |
-
msgs=output_messages, terminated=self.terminated, info=info
|
863 |
-
)
|
864 |
-
|
865 |
-
def _step_tool_call_and_update(
|
866 |
-
self, response: ChatCompletion
|
867 |
-
) -> FunctionCallingRecord:
|
868 |
-
r"""Processes a function call within the chat completion response,
|
869 |
-
records the function call in the provided list of tool calls and
|
870 |
-
updates the memory of the current agent.
|
871 |
-
|
872 |
-
Args:
|
873 |
-
response (ChatCompletion): The response object from the chat
|
874 |
-
completion.
|
875 |
-
|
876 |
-
Returns:
|
877 |
-
FunctionCallingRecord: The record of calling the function.
|
878 |
-
"""
|
879 |
-
|
880 |
-
# Perform function calling
|
881 |
-
func_assistant_msg, func_result_msg, tool_call_record = (
|
882 |
-
self.step_tool_call(response)
|
883 |
-
)
|
884 |
-
|
885 |
-
# Update the messages
|
886 |
-
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
887 |
-
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
888 |
-
|
889 |
-
return tool_call_record
|
890 |
-
|
891 |
-
async def _step_tool_call_and_update_async(
|
892 |
-
self, response: ChatCompletion
|
893 |
-
) -> FunctionCallingRecord:
|
894 |
-
(
|
895 |
-
func_assistant_msg,
|
896 |
-
func_result_msg,
|
897 |
-
func_record,
|
898 |
-
) = await self.step_tool_call_async(response)
|
899 |
-
|
900 |
-
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
901 |
-
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
902 |
-
|
903 |
-
return func_record
|
904 |
-
|
905 |
-
def _structure_output_with_function(
|
906 |
-
self, response_format: Type[BaseModel]
|
907 |
-
) -> Tuple[
|
908 |
-
List[BaseMessage],
|
909 |
-
List[str],
|
910 |
-
Dict[str, int],
|
911 |
-
str,
|
912 |
-
FunctionCallingRecord,
|
913 |
-
int,
|
914 |
-
]:
|
915 |
-
r"""Internal function of structuring the output of the agent based on
|
916 |
-
the given output schema.
|
917 |
-
|
918 |
-
Args:
|
919 |
-
response_format (Type[BaseModel]): The output schema to use for
|
920 |
-
structuring the output.
|
921 |
-
|
922 |
-
Returns:
|
923 |
-
Tuple[List[BaseMessage], List[str], Dict[str, int], str,
|
924 |
-
FunctionCallingRecord, int]:
|
925 |
-
A tuple containing the output messages, finish reasons, usage
|
926 |
-
dictionary, response ID, function calling record, and number of
|
927 |
-
tokens.
|
928 |
-
"""
|
929 |
-
from camel.toolkits import FunctionTool
|
930 |
-
|
931 |
-
schema_json = get_pydantic_object_schema(response_format)
|
932 |
-
func_str = json_to_function_code(schema_json)
|
933 |
-
func_callable = func_string_to_callable(func_str)
|
934 |
-
func = FunctionTool(func_callable)
|
935 |
-
|
936 |
-
original_func_dict = self.func_dict
|
937 |
-
original_model_dict = self.model_backend.model_config_dict
|
938 |
-
|
939 |
-
# Replace the original tools with the structuring function
|
940 |
-
self.func_dict = {func.get_function_name(): func.func}
|
941 |
-
self.tool_dict = {func.get_function_name(): func}
|
942 |
-
self.model_backend.model_config_dict = original_model_dict.copy()
|
943 |
-
self.model_backend.model_config_dict["tools"] = [
|
944 |
-
func.get_openai_tool_schema()
|
945 |
-
]
|
946 |
-
self.model_backend.model_config_dict["tool_choice"] = "required"
|
947 |
-
|
948 |
-
openai_messages, num_tokens = self.memory.get_context()
|
949 |
-
(
|
950 |
-
response,
|
951 |
-
output_messages,
|
952 |
-
finish_reasons,
|
953 |
-
usage_dict,
|
954 |
-
response_id,
|
955 |
-
) = self._step_model_response(openai_messages, num_tokens)
|
956 |
-
|
957 |
-
if isinstance(response, ChatCompletion):
|
958 |
-
tool_call_record = self._step_tool_call_and_update(response)
|
959 |
-
else:
|
960 |
-
raise ValueError(
|
961 |
-
"Structured output is not supported for stream responses."
|
962 |
-
)
|
963 |
-
|
964 |
-
for base_message_item in output_messages:
|
965 |
-
base_message_item.content = str(tool_call_record.result)
|
966 |
-
|
967 |
-
# Recover the original tools
|
968 |
-
self.func_dict = original_func_dict
|
969 |
-
self.model_backend.model_config_dict = original_model_dict
|
970 |
-
|
971 |
-
return (
|
972 |
-
output_messages,
|
973 |
-
finish_reasons,
|
974 |
-
usage_dict,
|
975 |
-
response_id,
|
976 |
-
tool_call_record,
|
977 |
-
num_tokens,
|
978 |
-
)
|
979 |
-
|
980 |
-
def _step_model_response(
|
981 |
-
self,
|
982 |
-
openai_messages: List[OpenAIMessage],
|
983 |
-
num_tokens: int,
|
984 |
-
) -> tuple[
|
985 |
-
Union[ChatCompletion, Stream],
|
986 |
-
List[BaseMessage],
|
987 |
-
List[str],
|
988 |
-
Dict[str, int],
|
989 |
-
str,
|
990 |
-
]:
|
991 |
-
r"""Internal function for agent step model response."""
|
992 |
-
|
993 |
-
response = None
|
994 |
-
# Obtain the model's response
|
995 |
-
for _ in range(len(self.model_backend.models)):
|
996 |
-
try:
|
997 |
-
response = self.model_backend.run(openai_messages)
|
998 |
-
break
|
999 |
-
except Exception as exc:
|
1000 |
-
logger.error(
|
1001 |
-
f"An error occurred while running model "
|
1002 |
-
f"{self.model_backend.model_type}, "
|
1003 |
-
f"index: {self.model_backend.current_model_index}",
|
1004 |
-
exc_info=exc,
|
1005 |
-
)
|
1006 |
-
continue
|
1007 |
-
if not response:
|
1008 |
-
raise ModelProcessingError(
|
1009 |
-
"Unable to process messages: none of the provided models "
|
1010 |
-
"run succesfully."
|
1011 |
-
)
|
1012 |
-
|
1013 |
-
# logger.debug(
|
1014 |
-
# f"Model {self.model_backend.model_type}, "
|
1015 |
-
# f"index {self.model_backend.current_model_index}, "
|
1016 |
-
# f"processed these messages: {openai_messages}"
|
1017 |
-
# )
|
1018 |
-
|
1019 |
-
if isinstance(response, ChatCompletion):
|
1020 |
-
output_messages, finish_reasons, usage_dict, response_id = (
|
1021 |
-
self.handle_batch_response(response)
|
1022 |
-
)
|
1023 |
-
else:
|
1024 |
-
output_messages, finish_reasons, usage_dict, response_id = (
|
1025 |
-
self.handle_stream_response(response, num_tokens)
|
1026 |
-
)
|
1027 |
-
return (
|
1028 |
-
response,
|
1029 |
-
output_messages,
|
1030 |
-
finish_reasons,
|
1031 |
-
usage_dict,
|
1032 |
-
response_id,
|
1033 |
-
)
|
1034 |
-
|
1035 |
-
def _step_get_info(
|
1036 |
-
self,
|
1037 |
-
output_messages: List[BaseMessage],
|
1038 |
-
finish_reasons: List[str],
|
1039 |
-
usage_dict: Dict[str, int],
|
1040 |
-
response_id: str,
|
1041 |
-
tool_calls: List[FunctionCallingRecord],
|
1042 |
-
num_tokens: int,
|
1043 |
-
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
1044 |
-
) -> Dict[str, Any]:
|
1045 |
-
r"""Process the output of a chat step and gather information about the
|
1046 |
-
step.
|
1047 |
-
|
1048 |
-
This method checks for termination conditions, updates the agent's
|
1049 |
-
state, and collects information about the chat step, including tool
|
1050 |
-
calls and termination reasons.
|
1051 |
-
|
1052 |
-
Args:
|
1053 |
-
output_messages (List[BaseMessage]): The messages generated in
|
1054 |
-
this step.
|
1055 |
-
finish_reasons (List[str]): The reasons for finishing the
|
1056 |
-
generation for each message.
|
1057 |
-
usage_dict (Dict[str, int]): Dictionary containing token usage
|
1058 |
-
information.
|
1059 |
-
response_id (str): The ID of the response from the model.
|
1060 |
-
tool_calls (List[FunctionCallingRecord]): Records of function calls
|
1061 |
-
made during this step.
|
1062 |
-
num_tokens (int): The number of tokens used in this step.
|
1063 |
-
external_tool_request (Optional[ChatCompletionMessageToolCall]):
|
1064 |
-
Any external tool request made during this step.
|
1065 |
-
(default::obj:`None`)
|
1066 |
-
|
1067 |
-
Returns:
|
1068 |
-
Dict[str, Any]: A dictionary containing information about the chat
|
1069 |
-
step, including termination status, reasons, and tool call
|
1070 |
-
information.
|
1071 |
-
|
1072 |
-
Note:
|
1073 |
-
This method iterates over all response terminators and checks if
|
1074 |
-
any of them signal termination. If a terminator signals
|
1075 |
-
termination, the agent's state is updated accordingly, and the
|
1076 |
-
termination reason is recorded.
|
1077 |
-
"""
|
1078 |
-
termination = [
|
1079 |
-
terminator.is_terminated(output_messages)
|
1080 |
-
for terminator in self.response_terminators
|
1081 |
-
]
|
1082 |
-
# Terminate the agent if any of the terminator terminates
|
1083 |
-
self.terminated, termination_reason = next(
|
1084 |
-
(
|
1085 |
-
(terminated, termination_reason)
|
1086 |
-
for terminated, termination_reason in termination
|
1087 |
-
if terminated
|
1088 |
-
),
|
1089 |
-
(False, None),
|
1090 |
-
)
|
1091 |
-
# For now only retain the first termination reason
|
1092 |
-
if self.terminated and termination_reason is not None:
|
1093 |
-
finish_reasons = [termination_reason] * len(finish_reasons)
|
1094 |
-
|
1095 |
-
info = self.get_info(
|
1096 |
-
response_id,
|
1097 |
-
usage_dict,
|
1098 |
-
finish_reasons,
|
1099 |
-
num_tokens,
|
1100 |
-
tool_calls,
|
1101 |
-
external_tool_request,
|
1102 |
-
)
|
1103 |
-
return info
|
1104 |
-
|
1105 |
-
def handle_batch_response(
|
1106 |
-
self, response: ChatCompletion
|
1107 |
-
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
1108 |
-
r"""Process a batch response from the model and extract the necessary
|
1109 |
-
information.
|
1110 |
-
|
1111 |
-
Args:
|
1112 |
-
response (dict): Model response.
|
1113 |
-
|
1114 |
-
Returns:
|
1115 |
-
tuple: A tuple of list of output `ChatMessage`, list of
|
1116 |
-
finish reasons, usage dictionary, and response id.
|
1117 |
-
"""
|
1118 |
-
output_messages: List[BaseMessage] = []
|
1119 |
-
for choice in response.choices:
|
1120 |
-
chat_message = BaseMessage(
|
1121 |
-
role_name=self.role_name,
|
1122 |
-
role_type=self.role_type,
|
1123 |
-
meta_dict=dict(),
|
1124 |
-
content=choice.message.content or "",
|
1125 |
-
parsed=getattr(choice.message, 'parsed', None),
|
1126 |
-
)
|
1127 |
-
# Process log probabilities and append to the message meta information
|
1128 |
-
if choice.logprobs is not None:
|
1129 |
-
tokens_logprobs = choice.logprobs.content
|
1130 |
-
|
1131 |
-
if tokens_logprobs is not None:
|
1132 |
-
# Extract and structure logprob information
|
1133 |
-
logprobs_info = [
|
1134 |
-
{
|
1135 |
-
"token": token_logprob.token,
|
1136 |
-
"logprob": token_logprob.logprob,
|
1137 |
-
"top_logprobs": [
|
1138 |
-
(top_logprob.token, top_logprob.logprob)
|
1139 |
-
for top_logprob in token_logprob.top_logprobs
|
1140 |
-
],
|
1141 |
-
}
|
1142 |
-
for token_logprob in tokens_logprobs
|
1143 |
-
]
|
1144 |
-
# Ensure meta_dict exists before adding logprobs info
|
1145 |
-
if chat_message.meta_dict is None:
|
1146 |
-
chat_message.meta_dict = {}
|
1147 |
-
chat_message.meta_dict["logprobs_info"] = logprobs_info
|
1148 |
-
# Append the processed chat message to output
|
1149 |
-
output_messages.append(chat_message)
|
1150 |
-
|
1151 |
-
finish_reasons = [
|
1152 |
-
str(choice.finish_reason) for choice in response.choices
|
1153 |
-
]
|
1154 |
-
usage = (
|
1155 |
-
self._safe_model_dump(response.usage)
|
1156 |
-
if response.usage is not None
|
1157 |
-
else {}
|
1158 |
-
)
|
1159 |
-
return (
|
1160 |
-
output_messages,
|
1161 |
-
finish_reasons,
|
1162 |
-
usage,
|
1163 |
-
response.id,
|
1164 |
-
)
|
1165 |
-
|
1166 |
-
def _safe_model_dump(self, obj) -> dict:
|
1167 |
-
r"""Safely dump a Pydantic model to a dictionary.
|
1168 |
-
|
1169 |
-
This method attempts to use the `model_dump` method if available,
|
1170 |
-
otherwise it falls back to the `dict` method.
|
1171 |
-
|
1172 |
-
Args:
|
1173 |
-
obj: The Pydantic model instance to be dumped.
|
1174 |
-
|
1175 |
-
Returns:
|
1176 |
-
dict: A dictionary representation of the Pydantic model.
|
1177 |
-
"""
|
1178 |
-
# Check if the `model_dump` method exists (Pydantic v2)
|
1179 |
-
if hasattr(obj, 'model_dump'):
|
1180 |
-
return obj.model_dump()
|
1181 |
-
# Fallback to `dict()` method (Pydantic v1)
|
1182 |
-
elif hasattr(obj, 'dict'):
|
1183 |
-
return obj.dict()
|
1184 |
-
else:
|
1185 |
-
raise TypeError("The object is not a Pydantic model")
|
1186 |
-
|
1187 |
-
def handle_stream_response(
|
1188 |
-
self,
|
1189 |
-
response: Stream[ChatCompletionChunk],
|
1190 |
-
prompt_tokens: int,
|
1191 |
-
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
1192 |
-
r"""Process a stream response from the model and extract the necessary
|
1193 |
-
information.
|
1194 |
-
|
1195 |
-
Args:
|
1196 |
-
response (dict): Model response.
|
1197 |
-
prompt_tokens (int): Number of input prompt tokens.
|
1198 |
-
|
1199 |
-
Returns:
|
1200 |
-
tuple: A tuple of list of output `ChatMessage`, list of
|
1201 |
-
finish reasons, usage dictionary, and response id.
|
1202 |
-
"""
|
1203 |
-
content_dict: defaultdict = defaultdict(lambda: "")
|
1204 |
-
finish_reasons_dict: defaultdict = defaultdict(lambda: "")
|
1205 |
-
output_messages: List[BaseMessage] = []
|
1206 |
-
response_id: str = ""
|
1207 |
-
# All choices in one response share one role
|
1208 |
-
for chunk in response:
|
1209 |
-
response_id = chunk.id
|
1210 |
-
for choice in chunk.choices:
|
1211 |
-
index = choice.index
|
1212 |
-
delta = choice.delta
|
1213 |
-
if delta.content is not None:
|
1214 |
-
# When response has not been stopped
|
1215 |
-
# Notice that only the first chunk_dict has the "role"
|
1216 |
-
content_dict[index] += delta.content
|
1217 |
-
if choice.finish_reason:
|
1218 |
-
finish_reasons_dict[index] = choice.finish_reason
|
1219 |
-
chat_message = BaseMessage(
|
1220 |
-
role_name=self.role_name,
|
1221 |
-
role_type=self.role_type,
|
1222 |
-
meta_dict=dict(),
|
1223 |
-
content=content_dict[index],
|
1224 |
-
)
|
1225 |
-
output_messages.append(chat_message)
|
1226 |
-
finish_reasons = [
|
1227 |
-
finish_reasons_dict[i] for i in range(len(finish_reasons_dict))
|
1228 |
-
]
|
1229 |
-
usage_dict = self.get_usage_dict(output_messages, prompt_tokens)
|
1230 |
-
return output_messages, finish_reasons, usage_dict, response_id
|
1231 |
-
|
1232 |
-
def _step_token_exceed(
|
1233 |
-
self,
|
1234 |
-
num_tokens: int,
|
1235 |
-
tool_calls: List[FunctionCallingRecord],
|
1236 |
-
termination_reason: str,
|
1237 |
-
) -> ChatAgentResponse:
|
1238 |
-
r"""Return trivial response containing number of tokens and information
|
1239 |
-
of called functions when the number of tokens exceeds.
|
1240 |
-
|
1241 |
-
Args:
|
1242 |
-
num_tokens (int): Number of tokens in the messages.
|
1243 |
-
tool_calls (List[FunctionCallingRecord]): List of information
|
1244 |
-
objects of functions called in the current step.
|
1245 |
-
termination_reason (str): String of termination reason.
|
1246 |
-
|
1247 |
-
Returns:
|
1248 |
-
ChatAgentResponse: The struct containing trivial outputs and
|
1249 |
-
information about token number and called functions.
|
1250 |
-
"""
|
1251 |
-
self.terminated = True
|
1252 |
-
output_messages: List[BaseMessage] = []
|
1253 |
-
|
1254 |
-
info = self.get_info(
|
1255 |
-
None,
|
1256 |
-
None,
|
1257 |
-
[termination_reason],
|
1258 |
-
num_tokens,
|
1259 |
-
tool_calls,
|
1260 |
-
)
|
1261 |
-
|
1262 |
-
return ChatAgentResponse(
|
1263 |
-
msgs=output_messages,
|
1264 |
-
terminated=self.terminated,
|
1265 |
-
info=info,
|
1266 |
-
)
|
1267 |
-
|
1268 |
-
def step_tool_call(
|
1269 |
-
self,
|
1270 |
-
response: ChatCompletion,
|
1271 |
-
) -> Tuple[
|
1272 |
-
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
|
1273 |
-
]:
|
1274 |
-
r"""Execute the function with arguments following the model's response.
|
1275 |
-
|
1276 |
-
Args:
|
1277 |
-
response (Dict[str, Any]): The response obtained by calling the
|
1278 |
-
model.
|
1279 |
-
|
1280 |
-
Returns:
|
1281 |
-
tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
|
1282 |
-
one about the arguments and the other about the execution
|
1283 |
-
result, and a struct for logging information about this
|
1284 |
-
function call.
|
1285 |
-
"""
|
1286 |
-
choice = response.choices[0]
|
1287 |
-
if choice.message.tool_calls is None:
|
1288 |
-
raise RuntimeError("Tool call is None")
|
1289 |
-
func_name = choice.message.tool_calls[0].function.name
|
1290 |
-
|
1291 |
-
args = json.loads(choice.message.tool_calls[0].function.arguments)
|
1292 |
-
tool = self.tool_dict[func_name]
|
1293 |
-
|
1294 |
-
# ! Here, if the agent calls advanced reasoning, provide the chat history
|
1295 |
-
if func_name == "make_advanced_reasoning":
|
1296 |
-
reformed_question = f"""
|
1297 |
-
Please help an assistant to solve reasoning tasks.
|
1298 |
-
Here are the chat history between the assistant and the user, which may help you understand the intention of the user and the question:
|
1299 |
-
<chat_history>{self.memory.get_context()}</chat_history>
|
1300 |
-
|
1301 |
-
Now please answer the following question:
|
1302 |
-
<question>{args['question']}</question>
|
1303 |
-
"""
|
1304 |
-
args["question"] = reformed_question
|
1305 |
-
|
1306 |
-
result = tool(**args)
|
1307 |
-
|
1308 |
-
assist_msg = FunctionCallingMessage(
|
1309 |
-
role_name=self.role_name,
|
1310 |
-
role_type=self.role_type,
|
1311 |
-
meta_dict=None,
|
1312 |
-
content="",
|
1313 |
-
func_name=func_name,
|
1314 |
-
args=args,
|
1315 |
-
)
|
1316 |
-
func_msg = FunctionCallingMessage(
|
1317 |
-
role_name=self.role_name,
|
1318 |
-
role_type=self.role_type,
|
1319 |
-
meta_dict=None,
|
1320 |
-
content="",
|
1321 |
-
func_name=func_name,
|
1322 |
-
result=result,
|
1323 |
-
)
|
1324 |
-
|
1325 |
-
# Record information about this function call
|
1326 |
-
func_record = FunctionCallingRecord(
|
1327 |
-
func_name=func_name, args=args, result=result
|
1328 |
-
)
|
1329 |
-
return assist_msg, func_msg, func_record
|
1330 |
-
|
1331 |
-
async def step_tool_call_async(
|
1332 |
-
self,
|
1333 |
-
response: ChatCompletion,
|
1334 |
-
) -> Tuple[
|
1335 |
-
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
|
1336 |
-
]:
|
1337 |
-
r"""Execute the async function with arguments following the model's
|
1338 |
-
response.
|
1339 |
-
|
1340 |
-
Args:
|
1341 |
-
response (Dict[str, Any]): The response obtained by calling the
|
1342 |
-
model.
|
1343 |
-
|
1344 |
-
Returns:
|
1345 |
-
tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
|
1346 |
-
one about the arguments and the other about the execution
|
1347 |
-
result, and a struct for logging information about this
|
1348 |
-
function call.
|
1349 |
-
"""
|
1350 |
-
# Note that when function calling is enabled, `n` is set to 1.
|
1351 |
-
choice = response.choices[0]
|
1352 |
-
if choice.message.tool_calls is None:
|
1353 |
-
raise RuntimeError("Tool call is None")
|
1354 |
-
func_name = choice.message.tool_calls[0].function.name
|
1355 |
-
|
1356 |
-
args = json.loads(choice.message.tool_calls[0].function.arguments)
|
1357 |
-
tool = self.tool_dict[func_name]
|
1358 |
-
result = await tool(**args)
|
1359 |
-
|
1360 |
-
assist_msg = FunctionCallingMessage(
|
1361 |
-
role_name=self.role_name,
|
1362 |
-
role_type=self.role_type,
|
1363 |
-
meta_dict=None,
|
1364 |
-
content="",
|
1365 |
-
func_name=func_name,
|
1366 |
-
args=args,
|
1367 |
-
)
|
1368 |
-
func_msg = FunctionCallingMessage(
|
1369 |
-
role_name=self.role_name,
|
1370 |
-
role_type=self.role_type,
|
1371 |
-
meta_dict=None,
|
1372 |
-
content="",
|
1373 |
-
func_name=func_name,
|
1374 |
-
result=result,
|
1375 |
-
)
|
1376 |
-
|
1377 |
-
# Record information about this function call
|
1378 |
-
func_record = FunctionCallingRecord(
|
1379 |
-
func_name=func_name, args=args, result=result
|
1380 |
-
)
|
1381 |
-
return assist_msg, func_msg, func_record
|
1382 |
-
|
1383 |
-
def get_usage_dict(
|
1384 |
-
self, output_messages: List[BaseMessage], prompt_tokens: int
|
1385 |
-
) -> Dict[str, int]:
|
1386 |
-
r"""Get usage dictionary when using the stream mode.
|
1387 |
-
|
1388 |
-
Args:
|
1389 |
-
output_messages (list): List of output messages.
|
1390 |
-
prompt_tokens (int): Number of input prompt tokens.
|
1391 |
-
|
1392 |
-
Returns:
|
1393 |
-
dict: Usage dictionary.
|
1394 |
-
"""
|
1395 |
-
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
1396 |
-
completion_tokens = 0
|
1397 |
-
for message in output_messages:
|
1398 |
-
completion_tokens += len(encoding.encode(message.content))
|
1399 |
-
usage_dict = dict(
|
1400 |
-
completion_tokens=completion_tokens,
|
1401 |
-
prompt_tokens=prompt_tokens,
|
1402 |
-
total_tokens=completion_tokens + prompt_tokens,
|
1403 |
-
)
|
1404 |
-
return usage_dict
|
1405 |
-
|
1406 |
-
def add_model_scheduling_strategy(self, name: str, strategy_fn: Callable):
|
1407 |
-
r"""Add a scheduling strategy method provided by user to ModelManger.
|
1408 |
-
|
1409 |
-
Args:
|
1410 |
-
name (str): The name of the strategy.
|
1411 |
-
strategy_fn (Callable): The scheduling strategy function.
|
1412 |
-
"""
|
1413 |
-
self.model_backend.add_strategy(name, strategy_fn)
|
1414 |
-
|
1415 |
-
def __repr__(self) -> str:
|
1416 |
-
r"""Returns a string representation of the :obj:`ChatAgent`.
|
1417 |
-
|
1418 |
-
Returns:
|
1419 |
-
str: The string representation of the :obj:`ChatAgent`.
|
1420 |
-
"""
|
1421 |
-
return (
|
1422 |
-
f"ChatAgent({self.role_name}, {self.role_type}, {self.model_type})"
|
1423 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/critic_agent.py
DELETED
@@ -1,202 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
import random
|
15 |
-
import warnings
|
16 |
-
from typing import Any, Dict, Optional, Sequence
|
17 |
-
|
18 |
-
from colorama import Fore
|
19 |
-
|
20 |
-
from camel.agents.chat_agent import ChatAgent
|
21 |
-
from camel.memories import AgentMemory
|
22 |
-
from camel.messages import BaseMessage
|
23 |
-
from camel.models import BaseModelBackend
|
24 |
-
from camel.responses import ChatAgentResponse
|
25 |
-
from camel.utils import get_first_int, print_text_animated
|
26 |
-
|
27 |
-
# AgentOps decorator setting
|
28 |
-
try:
|
29 |
-
import os
|
30 |
-
|
31 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
32 |
-
from agentops import track_agent
|
33 |
-
else:
|
34 |
-
raise ImportError
|
35 |
-
except (ImportError, AttributeError):
|
36 |
-
from camel.utils import track_agent
|
37 |
-
|
38 |
-
|
39 |
-
@track_agent(name="CriticAgent")
|
40 |
-
class CriticAgent(ChatAgent):
|
41 |
-
r"""A class for the critic agent that assists in selecting an option.
|
42 |
-
|
43 |
-
Args:
|
44 |
-
system_message (BaseMessage): The system message for the critic
|
45 |
-
agent.
|
46 |
-
model (BaseModelBackend, optional): The model backend to use for
|
47 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
48 |
-
`GPT_4O_MINI`)
|
49 |
-
message_window_size (int, optional): The maximum number of previous
|
50 |
-
messages to include in the context window. If `None`, no windowing
|
51 |
-
is performed. (default: :obj:`6`)
|
52 |
-
retry_attempts (int, optional): The number of retry attempts if the
|
53 |
-
critic fails to return a valid option. (default: :obj:`2`)
|
54 |
-
verbose (bool, optional): Whether to print the critic's messages.
|
55 |
-
logger_color (Any): The color of the menu options displayed to the
|
56 |
-
user. (default: :obj:`Fore.MAGENTA`)
|
57 |
-
"""
|
58 |
-
|
59 |
-
def __init__(
|
60 |
-
self,
|
61 |
-
system_message: BaseMessage,
|
62 |
-
model: Optional[BaseModelBackend] = None,
|
63 |
-
memory: Optional[AgentMemory] = None,
|
64 |
-
message_window_size: int = 6,
|
65 |
-
retry_attempts: int = 2,
|
66 |
-
verbose: bool = False,
|
67 |
-
logger_color: Any = Fore.MAGENTA,
|
68 |
-
) -> None:
|
69 |
-
super().__init__(
|
70 |
-
system_message,
|
71 |
-
model=model,
|
72 |
-
memory=memory,
|
73 |
-
message_window_size=message_window_size,
|
74 |
-
)
|
75 |
-
self.options_dict: Dict[str, str] = dict()
|
76 |
-
self.retry_attempts = retry_attempts
|
77 |
-
self.verbose = verbose
|
78 |
-
self.logger_color = logger_color
|
79 |
-
|
80 |
-
def flatten_options(self, messages: Sequence[BaseMessage]) -> str:
|
81 |
-
r"""Flattens the options to the critic.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
messages (Sequence[BaseMessage]): A list of `BaseMessage` objects.
|
85 |
-
|
86 |
-
Returns:
|
87 |
-
str: A string containing the flattened options to the critic.
|
88 |
-
"""
|
89 |
-
options = [message.content for message in messages]
|
90 |
-
flatten_options = (
|
91 |
-
f"> Proposals from "
|
92 |
-
f"{messages[0].role_name} ({messages[0].role_type}). "
|
93 |
-
"Please choose an option:\n"
|
94 |
-
)
|
95 |
-
for index, option in enumerate(options):
|
96 |
-
flatten_options += f"Option {index + 1}:\n{option}\n\n"
|
97 |
-
self.options_dict[str(index + 1)] = option
|
98 |
-
format = (
|
99 |
-
f"Please first enter your choice ([1-{len(self.options_dict)}]) "
|
100 |
-
"and then your explanation and comparison: "
|
101 |
-
)
|
102 |
-
return flatten_options + format
|
103 |
-
|
104 |
-
def get_option(self, input_message: BaseMessage) -> str:
|
105 |
-
r"""Gets the option selected by the critic.
|
106 |
-
|
107 |
-
Args:
|
108 |
-
input_message (BaseMessage): A `BaseMessage` object representing
|
109 |
-
the input message.
|
110 |
-
|
111 |
-
Returns:
|
112 |
-
str: The option selected by the critic.
|
113 |
-
"""
|
114 |
-
# TODO: Add support for editing options by the critic.
|
115 |
-
msg_content = input_message.content
|
116 |
-
i = 0
|
117 |
-
while i < self.retry_attempts:
|
118 |
-
critic_response = self.step(input_message)
|
119 |
-
|
120 |
-
if critic_response.msgs is None or len(critic_response.msgs) == 0:
|
121 |
-
raise RuntimeError("Got None critic messages.")
|
122 |
-
if critic_response.terminated:
|
123 |
-
raise RuntimeError("Critic step failed.")
|
124 |
-
|
125 |
-
critic_msg = critic_response.msg
|
126 |
-
if self.verbose:
|
127 |
-
print_text_animated(
|
128 |
-
self.logger_color + "\n> Critic response: "
|
129 |
-
f"\x1b[3m{critic_msg.content}\x1b[0m\n"
|
130 |
-
)
|
131 |
-
choice = self.parse_critic(critic_msg)
|
132 |
-
|
133 |
-
if choice in self.options_dict:
|
134 |
-
return self.options_dict[choice]
|
135 |
-
else:
|
136 |
-
input_message = BaseMessage(
|
137 |
-
role_name=input_message.role_name,
|
138 |
-
role_type=input_message.role_type,
|
139 |
-
meta_dict=input_message.meta_dict,
|
140 |
-
content="> Invalid choice. Please choose again.\n"
|
141 |
-
+ msg_content,
|
142 |
-
)
|
143 |
-
i += 1
|
144 |
-
warnings.warn(
|
145 |
-
"Critic failed to get a valid option. "
|
146 |
-
f"After {self.retry_attempts} attempts. "
|
147 |
-
"Returning a random option."
|
148 |
-
)
|
149 |
-
return random.choice(list(self.options_dict.values()))
|
150 |
-
|
151 |
-
def parse_critic(self, critic_msg: BaseMessage) -> Optional[str]:
|
152 |
-
r"""Parses the critic's message and extracts the choice.
|
153 |
-
|
154 |
-
Args:
|
155 |
-
critic_msg (BaseMessage): A `BaseMessage` object representing the
|
156 |
-
critic's response.
|
157 |
-
|
158 |
-
Returns:
|
159 |
-
Optional[str]: The critic's choice as a string, or None if the
|
160 |
-
message could not be parsed.
|
161 |
-
"""
|
162 |
-
choice = str(get_first_int(critic_msg.content))
|
163 |
-
return choice
|
164 |
-
|
165 |
-
def reduce_step(
|
166 |
-
self,
|
167 |
-
input_messages: Sequence[BaseMessage],
|
168 |
-
) -> ChatAgentResponse:
|
169 |
-
r"""Performs one step of the conversation by flattening options to the
|
170 |
-
critic, getting the option, and parsing the choice.
|
171 |
-
|
172 |
-
Args:
|
173 |
-
input_messages (Sequence[BaseMessage]): A list of BaseMessage
|
174 |
-
objects.
|
175 |
-
|
176 |
-
Returns:
|
177 |
-
ChatAgentResponse: A `ChatAgentResponse` object includes the
|
178 |
-
critic's choice.
|
179 |
-
"""
|
180 |
-
meta_chat_message = BaseMessage(
|
181 |
-
role_name=input_messages[0].role_name,
|
182 |
-
role_type=input_messages[0].role_type,
|
183 |
-
meta_dict=input_messages[0].meta_dict,
|
184 |
-
content="",
|
185 |
-
)
|
186 |
-
|
187 |
-
flatten_options = self.flatten_options(input_messages)
|
188 |
-
if self.verbose:
|
189 |
-
print_text_animated(
|
190 |
-
self.logger_color + f"\x1b[3m{flatten_options}\x1b[0m\n"
|
191 |
-
)
|
192 |
-
input_msg = meta_chat_message.create_new_instance(flatten_options)
|
193 |
-
|
194 |
-
option = self.get_option(input_msg)
|
195 |
-
output_msg = meta_chat_message.create_new_instance(option)
|
196 |
-
|
197 |
-
# TODO: The return `info` can be improved.
|
198 |
-
return ChatAgentResponse(
|
199 |
-
msgs=[output_msg],
|
200 |
-
terminated=False,
|
201 |
-
info={},
|
202 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/deductive_reasoner_agent.py
DELETED
@@ -1,303 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
import re
|
15 |
-
from typing import Dict, List, Optional, Union
|
16 |
-
|
17 |
-
from camel.agents.chat_agent import ChatAgent
|
18 |
-
from camel.logger import get_logger
|
19 |
-
from camel.messages import BaseMessage
|
20 |
-
from camel.models import BaseModelBackend
|
21 |
-
from camel.prompts import TextPrompt
|
22 |
-
from camel.types import RoleType
|
23 |
-
|
24 |
-
logger = get_logger(__name__)
|
25 |
-
|
26 |
-
# AgentOps decorator setting
|
27 |
-
try:
|
28 |
-
import os
|
29 |
-
|
30 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
31 |
-
from agentops import track_agent
|
32 |
-
else:
|
33 |
-
raise ImportError
|
34 |
-
except (ImportError, AttributeError):
|
35 |
-
from camel.utils import track_agent
|
36 |
-
|
37 |
-
|
38 |
-
@track_agent(name="DeductiveReasonerAgent")
|
39 |
-
class DeductiveReasonerAgent(ChatAgent):
|
40 |
-
r"""An agent responsible for deductive reasoning. Model of deductive
|
41 |
-
reasoning:
|
42 |
-
- L: A ⊕ C -> q * B
|
43 |
-
- A represents the known starting state.
|
44 |
-
- B represents the known target state.
|
45 |
-
- C represents the conditions required to transition from A to B.
|
46 |
-
- Q represents the quality or effectiveness of the transition from
|
47 |
-
A to B.
|
48 |
-
- L represents the path or process from A to B.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
model (BaseModelBackend, optional): The model backend to use for
|
52 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
53 |
-
`GPT_4O_MINI`)
|
54 |
-
"""
|
55 |
-
|
56 |
-
def __init__(
|
57 |
-
self,
|
58 |
-
model: Optional[BaseModelBackend] = None,
|
59 |
-
) -> None:
|
60 |
-
system_message = BaseMessage(
|
61 |
-
role_name="Insight Agent",
|
62 |
-
role_type=RoleType.ASSISTANT,
|
63 |
-
meta_dict=None,
|
64 |
-
content="You assign roles based on tasks.",
|
65 |
-
)
|
66 |
-
super().__init__(system_message, model=model)
|
67 |
-
|
68 |
-
def deduce_conditions_and_quality(
|
69 |
-
self,
|
70 |
-
starting_state: str,
|
71 |
-
target_state: str,
|
72 |
-
role_descriptions_dict: Optional[Dict[str, str]] = None,
|
73 |
-
) -> Dict[str, Union[List[str], Dict[str, str]]]:
|
74 |
-
r"""Derives the conditions and quality from the starting state and the
|
75 |
-
target state based on the model of the deductive reasoning and the
|
76 |
-
knowledge base. It can optionally consider the roles involved in the
|
77 |
-
scenario, which allows tailoring the output more closely to the AI
|
78 |
-
agent's environment.
|
79 |
-
|
80 |
-
Args:
|
81 |
-
starting_state (str): The initial or starting state from which
|
82 |
-
conditions are deduced.
|
83 |
-
target_state (str): The target state of the task.
|
84 |
-
role_descriptions_dict (Optional[Dict[str, str]], optional): The
|
85 |
-
descriptions of the roles. (default: :obj:`None`)
|
86 |
-
role_descriptions_dict (Optional[Dict[str, str]], optional): A
|
87 |
-
dictionary describing the roles involved in the scenario. This
|
88 |
-
is optional and can be used to provide a context for the
|
89 |
-
CAMEL's role-playing, enabling the generation of more relevant
|
90 |
-
and tailored conditions and quality assessments. This could be
|
91 |
-
generated using a `RoleAssignmentAgent()` or defined manually
|
92 |
-
by the user.
|
93 |
-
|
94 |
-
Returns:
|
95 |
-
Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the
|
96 |
-
extracted data from the message. The dictionary contains three
|
97 |
-
keys:
|
98 |
-
- 'conditions': A list where each key is a condition ID and
|
99 |
-
each value is the corresponding condition text.
|
100 |
-
- 'labels': A list of label strings extracted from the message.
|
101 |
-
- 'quality': A string of quality assessment strings extracted
|
102 |
-
from the message.
|
103 |
-
"""
|
104 |
-
self.reset()
|
105 |
-
|
106 |
-
deduce_prompt = """You are a deductive reasoner. You are tasked to
|
107 |
-
complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the
|
108 |
-
STARTING STATE A and the TARGET STATE B. You are given the CONTEXT
|
109 |
-
CONTENT to help you complete the TASK.
|
110 |
-
Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY
|
111 |
-
fill in the BLANKs, and DO NOT alter or modify any other part of the template
|
112 |
-
|
113 |
-
===== MODELING OF DEDUCTIVE REASONING =====
|
114 |
-
You are tasked with understanding a mathematical model based on the components
|
115 |
-
${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
|
116 |
-
- $A$ represents the known starting state.
|
117 |
-
- $B$ represents the known target state.
|
118 |
-
- $C$ represents the conditions required to transition from $A$ to $B$.
|
119 |
-
- $Q$ represents the quality or effectiveness of the transition from $A$ to
|
120 |
-
$B$.
|
121 |
-
- $L$ represents the path or process from $A$ to $B$.
|
122 |
-
|
123 |
-
===== THOUGHT OF DEDUCTIVE REASONING =====
|
124 |
-
1. Define the Parameters of A and B:
|
125 |
-
- Characterization: Before delving into transitions, thoroughly understand
|
126 |
-
the nature and boundaries of both $A$ and $B$. This includes the type,
|
127 |
-
properties, constraints, and possible interactions between the two.
|
128 |
-
- Contrast and Compare: Highlight the similarities and differences between
|
129 |
-
$A$ and $B$. This comparative analysis will give an insight into what
|
130 |
-
needs changing and what remains constant.
|
131 |
-
2. Historical & Empirical Analysis:
|
132 |
-
- Previous Transitions according to the Knowledge Base of GPT: (if
|
133 |
-
applicable) Extract conditions and patterns from the historical instances
|
134 |
-
where a similar transition from a state comparable to $A$ moved towards
|
135 |
-
$B$.
|
136 |
-
- Scientific Principles: (if applicable) Consider the underlying
|
137 |
-
scientific principles governing or related to the states and their
|
138 |
-
transition. For example, if $A$ and $B$ are physical states, laws of
|
139 |
-
physics might apply.
|
140 |
-
3. Logical Deduction of Conditions ($C$):
|
141 |
-
- Direct Path Analysis: What are the immediate and direct conditions
|
142 |
-
required to move from $A$ to $B$?
|
143 |
-
- Intermediate States: Are there states between $A$ and $B$ that must be
|
144 |
-
traversed or can be used to make the transition smoother or more
|
145 |
-
efficient? If yes, what is the content?
|
146 |
-
- Constraints & Limitations: Identify potential barriers or restrictions
|
147 |
-
in moving from $A$ to $B$. These can be external (e.g., environmental
|
148 |
-
factors) or internal (properties of $A$ or $B$).
|
149 |
-
- Resource and Information Analysis: What resources and information are
|
150 |
-
required for the transition? This could be time, entity, factor, code
|
151 |
-
language, software platform, unknowns, etc.
|
152 |
-
- External Influences: Consider socio-economic, political, or
|
153 |
-
environmental factors (if applicable) that could influence the transition
|
154 |
-
conditions.
|
155 |
-
- Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s,
|
156 |
-
no matter how unconventional they might seem. Utilize analogies,
|
157 |
-
metaphors, or brainstorming techniques to envision possible conditions or
|
158 |
-
paths from $A$ to $B$.
|
159 |
-
- The conditions $C$ should be multiple but in one sentence. And each
|
160 |
-
condition should be concerned with one aspect/entity.
|
161 |
-
4. Entity/Label Recognition of Conditions ($C$):
|
162 |
-
- Identify and categorize entities of Conditions ($C$) such as the names,
|
163 |
-
locations, dates, specific technical terms or contextual parameters that
|
164 |
-
might be associated with events, innovations post-2022.
|
165 |
-
- The output of the entities/labels will be used as tags or labels for
|
166 |
-
semantic similarity searches. The entities/labels may be the words, or
|
167 |
-
phrases, each of them should contain valuable, high information entropy
|
168 |
-
information, and should be independent.
|
169 |
-
- Ensure that the identified entities are formatted in a manner suitable
|
170 |
-
for database indexing and retrieval. Organize the entities into
|
171 |
-
categories, and combine the category with its instance into a continuous
|
172 |
-
phrase, without using colons or other separators.
|
173 |
-
- Format these entities for database indexing: output the category rather
|
174 |
-
than its instance/content into a continuous phrase. For example, instead
|
175 |
-
of "Jan. 02", identify it as "Event time".
|
176 |
-
5. Quality Assessment ($Q$):
|
177 |
-
- Efficiency: How efficient is the transition from $A$ to $B$, which
|
178 |
-
measures the resources used versus the desired outcome?
|
179 |
-
- Effectiveness: Did the transition achieve the desired outcome or was the
|
180 |
-
target state achieved as intended?
|
181 |
-
- Safety & Risks: Assess any risks associated with the transition and the
|
182 |
-
measures to mitigate them.
|
183 |
-
- Feedback Mechanisms: Incorporate feedback loops to continuously monitor
|
184 |
-
and adjust the quality of transition, making it more adaptive.
|
185 |
-
6. Iterative Evaluation:
|
186 |
-
- Test & Refine: Based on the initially deduced conditions and assessed
|
187 |
-
quality, iterate the process to refine and optimize the transition. This
|
188 |
-
might involve tweaking conditions, employing different paths, or changing
|
189 |
-
resources.
|
190 |
-
- Feedback Integration: Use feedback to make improvements and increase the
|
191 |
-
quality of the transition.
|
192 |
-
7. Real-world scenarios often present challenges that may not be captured by
|
193 |
-
models and frameworks. While using the model, maintain an adaptive mindset:
|
194 |
-
- Scenario Exploration: Continuously imagine various possible scenarios,
|
195 |
-
both positive and negative, to prepare for unexpected events.
|
196 |
-
- Flexibility: Be prepared to modify conditions ($C$) or alter the path/
|
197 |
-
process ($L$) if unforeseen challenges arise.
|
198 |
-
- Feedback Integration: Rapidly integrate feedback from actual
|
199 |
-
implementations to adjust the model's application, ensuring relevancy and
|
200 |
-
effectiveness.
|
201 |
-
|
202 |
-
===== TASK =====
|
203 |
-
Given the starting state $A$ and the target state $B$, assuming that a path
|
204 |
-
$L$ always exists between $A$ and $B$, how can one deduce or identify the
|
205 |
-
necessary conditions $C$ and the quality $Q$ of the transition?
|
206 |
-
|
207 |
-
===== STARTING STATE $A$ =====
|
208 |
-
{starting_state}
|
209 |
-
|
210 |
-
===== TARGET STATE $B$ =====
|
211 |
-
{target_state}
|
212 |
-
|
213 |
-
{role_with_description_prompt}
|
214 |
-
===== ANSWER TEMPLATE =====
|
215 |
-
- Characterization and comparison of $A$ and $B$:\n<BLANK>
|
216 |
-
- Historical & Empirical Analysis:\n<BLANK>/None
|
217 |
-
- Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
|
218 |
-
condition <NUM>:
|
219 |
-
<BLANK>.
|
220 |
-
- Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include
|
221 |
-
square brackets)
|
222 |
-
- Quality Assessment ($Q$) (do not use symbols):
|
223 |
-
<BLANK>.
|
224 |
-
- Iterative Evaluation:\n<BLANK>/None"""
|
225 |
-
|
226 |
-
if role_descriptions_dict is not None:
|
227 |
-
role_names = role_descriptions_dict.keys()
|
228 |
-
role_with_description_prompt = (
|
229 |
-
"===== ROLES WITH DESCRIPTIONS =====\n"
|
230 |
-
+ "\n".join(
|
231 |
-
f"{role_name}:\n{role_descriptions_dict[role_name]}\n"
|
232 |
-
for role_name in role_names
|
233 |
-
)
|
234 |
-
+ "\n\n"
|
235 |
-
)
|
236 |
-
else:
|
237 |
-
role_with_description_prompt = ""
|
238 |
-
deduce_prompt = TextPrompt(deduce_prompt)
|
239 |
-
|
240 |
-
deduce = deduce_prompt.format(
|
241 |
-
starting_state=starting_state,
|
242 |
-
target_state=target_state,
|
243 |
-
role_with_description_prompt=role_with_description_prompt,
|
244 |
-
)
|
245 |
-
|
246 |
-
conditions_and_quality_generation_msg = BaseMessage.make_user_message(
|
247 |
-
role_name="Deductive Reasoner", content=deduce
|
248 |
-
)
|
249 |
-
|
250 |
-
response = self.step(
|
251 |
-
input_message=conditions_and_quality_generation_msg
|
252 |
-
)
|
253 |
-
|
254 |
-
if response.terminated:
|
255 |
-
raise RuntimeError(
|
256 |
-
"Deduction failed. Error:\n" + f"{response.info}"
|
257 |
-
)
|
258 |
-
msg: BaseMessage = response.msg
|
259 |
-
logger.info(f"Message content:\n{msg.content}")
|
260 |
-
|
261 |
-
# Extract the conditions from the message
|
262 |
-
conditions_dict = {
|
263 |
-
f"condition {i}": cdt.replace("<", "")
|
264 |
-
.replace(">", "")
|
265 |
-
.strip()
|
266 |
-
.strip('\n')
|
267 |
-
for i, cdt in re.findall(
|
268 |
-
r"condition (\d+):\s*(.+?)(?=condition \d+|- Entity)",
|
269 |
-
msg.content,
|
270 |
-
re.DOTALL,
|
271 |
-
)
|
272 |
-
}
|
273 |
-
|
274 |
-
# Extract the labels from the message
|
275 |
-
labels = [
|
276 |
-
label.strip().strip('\n').strip("\"'")
|
277 |
-
for label in re.findall(
|
278 |
-
r"Entity/Label Recognition of Conditions:\n\[(.+?)\]",
|
279 |
-
msg.content,
|
280 |
-
re.DOTALL,
|
281 |
-
)[0].split(",")
|
282 |
-
]
|
283 |
-
|
284 |
-
# Extract the quality from the message
|
285 |
-
quality = next(
|
286 |
-
q.strip().strip('\n')
|
287 |
-
for q in re.findall(
|
288 |
-
r"Quality Assessment \(\$Q\$\) \(do not use symbols\):"
|
289 |
-
r"\n(.+?)- Iterative",
|
290 |
-
msg.content,
|
291 |
-
re.DOTALL,
|
292 |
-
)
|
293 |
-
)
|
294 |
-
|
295 |
-
# Convert them into JSON format
|
296 |
-
conditions_and_quality_json: Dict[
|
297 |
-
str, Union[List[str], Dict[str, str]]
|
298 |
-
] = {}
|
299 |
-
conditions_and_quality_json["conditions"] = conditions_dict
|
300 |
-
conditions_and_quality_json["labels"] = labels
|
301 |
-
conditions_and_quality_json["evaluate_quality"] = quality
|
302 |
-
|
303 |
-
return conditions_and_quality_json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/embodied_agent.py
DELETED
@@ -1,201 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from typing import Any, List, Optional
|
15 |
-
|
16 |
-
from colorama import Fore
|
17 |
-
|
18 |
-
from camel.agents.chat_agent import ChatAgent
|
19 |
-
from camel.agents.tool_agents.base import BaseToolAgent
|
20 |
-
from camel.interpreters import (
|
21 |
-
BaseInterpreter,
|
22 |
-
InternalPythonInterpreter,
|
23 |
-
SubprocessInterpreter,
|
24 |
-
)
|
25 |
-
from camel.messages import BaseMessage
|
26 |
-
from camel.models import BaseModelBackend
|
27 |
-
from camel.responses import ChatAgentResponse
|
28 |
-
from camel.utils import print_text_animated
|
29 |
-
|
30 |
-
# AgentOps decorator setting
|
31 |
-
try:
|
32 |
-
import os
|
33 |
-
|
34 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
35 |
-
from agentops import track_agent
|
36 |
-
else:
|
37 |
-
raise ImportError
|
38 |
-
except (ImportError, AttributeError):
|
39 |
-
from camel.utils import track_agent
|
40 |
-
|
41 |
-
|
42 |
-
@track_agent(name="EmbodiedAgent")
|
43 |
-
class EmbodiedAgent(ChatAgent):
|
44 |
-
r"""Class for managing conversations of CAMEL Embodied Agents.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
system_message (BaseMessage): The system message for the chat agent.
|
48 |
-
model (BaseModelBackend, optional): The model backend to use for
|
49 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
50 |
-
`GPT_4O_MINI`)
|
51 |
-
message_window_size (int, optional): The maximum number of previous
|
52 |
-
messages to include in the context window. If `None`, no windowing
|
53 |
-
is performed. (default: :obj:`None`)
|
54 |
-
tool_agents (List[BaseToolAgent], optional): The tools agents to use in
|
55 |
-
the embodied agent. (default: :obj:`None`)
|
56 |
-
code_interpreter (BaseInterpreter, optional): The code interpreter to
|
57 |
-
execute codes. If `code_interpreter` and `tool_agent` are both
|
58 |
-
`None`, default to `SubProcessInterpreter`. If `code_interpreter`
|
59 |
-
is `None` and `tool_agents` is not `None`, default to
|
60 |
-
`InternalPythonInterpreter`. (default: :obj:`None`)
|
61 |
-
verbose (bool, optional): Whether to print the critic's messages.
|
62 |
-
logger_color (Any): The color of the logger displayed to the user.
|
63 |
-
(default: :obj:`Fore.MAGENTA`)
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __init__(
|
67 |
-
self,
|
68 |
-
system_message: BaseMessage,
|
69 |
-
model: Optional[BaseModelBackend] = None,
|
70 |
-
message_window_size: Optional[int] = None,
|
71 |
-
tool_agents: Optional[List[BaseToolAgent]] = None,
|
72 |
-
code_interpreter: Optional[BaseInterpreter] = None,
|
73 |
-
verbose: bool = False,
|
74 |
-
logger_color: Any = Fore.MAGENTA,
|
75 |
-
) -> None:
|
76 |
-
self.tool_agents = tool_agents
|
77 |
-
self.code_interpreter: BaseInterpreter
|
78 |
-
if code_interpreter is not None:
|
79 |
-
self.code_interpreter = code_interpreter
|
80 |
-
elif self.tool_agents:
|
81 |
-
self.code_interpreter = InternalPythonInterpreter()
|
82 |
-
else:
|
83 |
-
self.code_interpreter = SubprocessInterpreter()
|
84 |
-
|
85 |
-
if self.tool_agents:
|
86 |
-
system_message = self._set_tool_agents(system_message)
|
87 |
-
self.verbose = verbose
|
88 |
-
self.logger_color = logger_color
|
89 |
-
super().__init__(
|
90 |
-
system_message=system_message,
|
91 |
-
model=model,
|
92 |
-
message_window_size=message_window_size,
|
93 |
-
)
|
94 |
-
|
95 |
-
def _set_tool_agents(self, system_message: BaseMessage) -> BaseMessage:
|
96 |
-
action_space_prompt = self._get_tool_agents_prompt()
|
97 |
-
result_message = system_message.create_new_instance(
|
98 |
-
content=system_message.content.format(
|
99 |
-
action_space=action_space_prompt
|
100 |
-
)
|
101 |
-
)
|
102 |
-
if self.tool_agents is not None:
|
103 |
-
self.code_interpreter.update_action_space(
|
104 |
-
{tool.name: tool for tool in self.tool_agents}
|
105 |
-
)
|
106 |
-
return result_message
|
107 |
-
|
108 |
-
def _get_tool_agents_prompt(self) -> str:
|
109 |
-
r"""Returns the action space prompt.
|
110 |
-
|
111 |
-
Returns:
|
112 |
-
str: The action space prompt.
|
113 |
-
"""
|
114 |
-
if self.tool_agents is not None:
|
115 |
-
return "\n".join(
|
116 |
-
[
|
117 |
-
f"*** {tool.name} ***:\n {tool.description}"
|
118 |
-
for tool in self.tool_agents
|
119 |
-
]
|
120 |
-
)
|
121 |
-
else:
|
122 |
-
return ""
|
123 |
-
|
124 |
-
def get_tool_agent_names(self) -> List[str]:
|
125 |
-
r"""Returns the names of tool agents.
|
126 |
-
|
127 |
-
Returns:
|
128 |
-
List[str]: The names of tool agents.
|
129 |
-
"""
|
130 |
-
if self.tool_agents is not None:
|
131 |
-
return [tool.name for tool in self.tool_agents]
|
132 |
-
else:
|
133 |
-
return []
|
134 |
-
|
135 |
-
# ruff: noqa: E501
|
136 |
-
def step(self, input_message: BaseMessage) -> ChatAgentResponse: # type: ignore[override]
|
137 |
-
r"""Performs a step in the conversation.
|
138 |
-
|
139 |
-
Args:
|
140 |
-
input_message (BaseMessage): The input message.
|
141 |
-
|
142 |
-
Returns:
|
143 |
-
ChatAgentResponse: A struct containing the output messages,
|
144 |
-
a boolean indicating whether the chat session has terminated,
|
145 |
-
and information about the chat session.
|
146 |
-
"""
|
147 |
-
response = super().step(input_message)
|
148 |
-
|
149 |
-
if response.msgs is None or len(response.msgs) == 0:
|
150 |
-
raise RuntimeError("Got None output messages.")
|
151 |
-
if response.terminated:
|
152 |
-
raise RuntimeError(f"{self.__class__.__name__} step failed.")
|
153 |
-
|
154 |
-
# NOTE: Only single output messages are supported
|
155 |
-
explanations, codes = response.msg.extract_text_and_code_prompts()
|
156 |
-
|
157 |
-
if self.verbose:
|
158 |
-
for explanation, code in zip(explanations, codes):
|
159 |
-
print_text_animated(
|
160 |
-
self.logger_color + f"> Explanation:\n{explanation}"
|
161 |
-
)
|
162 |
-
print_text_animated(self.logger_color + f"> Code:\n{code}")
|
163 |
-
|
164 |
-
if len(explanations) > len(codes):
|
165 |
-
print_text_animated(
|
166 |
-
self.logger_color + f"> Explanation:\n{explanations[-1]}"
|
167 |
-
)
|
168 |
-
|
169 |
-
content = response.msg.content
|
170 |
-
|
171 |
-
if codes is not None:
|
172 |
-
try:
|
173 |
-
content = "\n> Executed Results:\n"
|
174 |
-
for block_idx, code in enumerate(codes):
|
175 |
-
executed_output = self.code_interpreter.run(
|
176 |
-
code, code.code_type
|
177 |
-
)
|
178 |
-
content += (
|
179 |
-
f"Executing code block {block_idx}: {{\n"
|
180 |
-
+ executed_output
|
181 |
-
+ "}\n"
|
182 |
-
)
|
183 |
-
except InterruptedError as e:
|
184 |
-
content = (
|
185 |
-
f"\n> Running code fail: {e}\n"
|
186 |
-
"Please regenerate the code."
|
187 |
-
)
|
188 |
-
|
189 |
-
# TODO: Handle errors
|
190 |
-
content = input_message.content + f"\n> Embodied Actions:\n{content}"
|
191 |
-
message = BaseMessage(
|
192 |
-
input_message.role_name,
|
193 |
-
input_message.role_type,
|
194 |
-
input_message.meta_dict,
|
195 |
-
content,
|
196 |
-
)
|
197 |
-
return ChatAgentResponse(
|
198 |
-
msgs=[message],
|
199 |
-
terminated=response.terminated,
|
200 |
-
info=response.info,
|
201 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/knowledge_graph_agent.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from typing import TYPE_CHECKING, Optional, Union
|
15 |
-
|
16 |
-
if TYPE_CHECKING:
|
17 |
-
from unstructured.documents.elements import Element
|
18 |
-
|
19 |
-
from camel.agents import ChatAgent
|
20 |
-
from camel.messages import BaseMessage
|
21 |
-
from camel.models import BaseModelBackend
|
22 |
-
from camel.prompts import TextPrompt
|
23 |
-
from camel.storages.graph_storages.graph_element import (
|
24 |
-
GraphElement,
|
25 |
-
Node,
|
26 |
-
Relationship,
|
27 |
-
)
|
28 |
-
from camel.types import RoleType
|
29 |
-
|
30 |
-
# AgentOps decorator setting
|
31 |
-
try:
|
32 |
-
import os
|
33 |
-
|
34 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
35 |
-
from agentops import track_agent
|
36 |
-
else:
|
37 |
-
raise ImportError
|
38 |
-
except (ImportError, AttributeError):
|
39 |
-
from camel.utils import track_agent
|
40 |
-
|
41 |
-
|
42 |
-
text_prompt = """
|
43 |
-
You are tasked with extracting nodes and relationships from given content and
|
44 |
-
structures them into Node and Relationship objects. Here's the outline of what
|
45 |
-
you needs to do:
|
46 |
-
|
47 |
-
Content Extraction:
|
48 |
-
You should be able to process input content and identify entities mentioned
|
49 |
-
within it.
|
50 |
-
Entities can be any noun phrases or concepts that represent distinct entities
|
51 |
-
in the context of the given content.
|
52 |
-
|
53 |
-
Node Extraction:
|
54 |
-
For each identified entity, you should create a Node object.
|
55 |
-
Each Node object should have a unique identifier (id) and a type (type).
|
56 |
-
Additional properties associated with the node can also be extracted and
|
57 |
-
stored.
|
58 |
-
|
59 |
-
Relationship Extraction:
|
60 |
-
You should identify relationships between entities mentioned in the content.
|
61 |
-
For each relationship, create a Relationship object.
|
62 |
-
A Relationship object should have a subject (subj) and an object (obj) which
|
63 |
-
are Node objects representing the entities involved in the relationship.
|
64 |
-
Each relationship should also have a type (type), and additional properties if
|
65 |
-
applicable.
|
66 |
-
|
67 |
-
Output Formatting:
|
68 |
-
The extracted nodes and relationships should be formatted as instances of the
|
69 |
-
provided Node and Relationship classes.
|
70 |
-
Ensure that the extracted data adheres to the structure defined by the classes.
|
71 |
-
Output the structured data in a format that can be easily validated against
|
72 |
-
the provided code.
|
73 |
-
|
74 |
-
Instructions for you:
|
75 |
-
Read the provided content thoroughly.
|
76 |
-
Identify distinct entities mentioned in the content and categorize them as
|
77 |
-
nodes.
|
78 |
-
Determine relationships between these entities and represent them as directed
|
79 |
-
relationships.
|
80 |
-
Provide the extracted nodes and relationships in the specified format below.
|
81 |
-
Example for you:
|
82 |
-
|
83 |
-
Example Content:
|
84 |
-
"John works at XYZ Corporation. He is a software engineer. The company is
|
85 |
-
located in New York City."
|
86 |
-
|
87 |
-
Expected Output:
|
88 |
-
|
89 |
-
Nodes:
|
90 |
-
|
91 |
-
Node(id='John', type='Person')
|
92 |
-
Node(id='XYZ Corporation', type='Organization')
|
93 |
-
Node(id='New York City', type='Location')
|
94 |
-
|
95 |
-
Relationships:
|
96 |
-
|
97 |
-
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
|
98 |
-
Corporation', type='Organization'), type='WorksAt')
|
99 |
-
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
|
100 |
-
type='Location'), type='ResidesIn')
|
101 |
-
|
102 |
-
===== TASK =====
|
103 |
-
Please extracts nodes and relationships from given content and structures them
|
104 |
-
into Node and Relationship objects.
|
105 |
-
|
106 |
-
{task}
|
107 |
-
"""
|
108 |
-
|
109 |
-
|
110 |
-
@track_agent(name="KnowledgeGraphAgent")
|
111 |
-
class KnowledgeGraphAgent(ChatAgent):
|
112 |
-
r"""An agent that can extract node and relationship information for
|
113 |
-
different entities from given `Element` content.
|
114 |
-
|
115 |
-
Attributes:
|
116 |
-
task_prompt (TextPrompt): A prompt for the agent to extract node and
|
117 |
-
relationship information for different entities.
|
118 |
-
"""
|
119 |
-
|
120 |
-
def __init__(
|
121 |
-
self,
|
122 |
-
model: Optional[BaseModelBackend] = None,
|
123 |
-
) -> None:
|
124 |
-
r"""Initialize the `KnowledgeGraphAgent`.
|
125 |
-
|
126 |
-
Args:
|
127 |
-
model (BaseModelBackend, optional): The model backend to use for
|
128 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
129 |
-
`GPT_4O_MINI`)
|
130 |
-
"""
|
131 |
-
system_message = BaseMessage(
|
132 |
-
role_name="Graphify",
|
133 |
-
role_type=RoleType.ASSISTANT,
|
134 |
-
meta_dict=None,
|
135 |
-
content="Your mission is to transform unstructured content "
|
136 |
-
"into structured graph data. Extract nodes and relationships with "
|
137 |
-
"precision, and let the connections unfold. Your graphs will "
|
138 |
-
"illuminate the hidden connections within the chaos of "
|
139 |
-
"information.",
|
140 |
-
)
|
141 |
-
super().__init__(system_message, model=model)
|
142 |
-
|
143 |
-
def run(
|
144 |
-
self,
|
145 |
-
element: "Element",
|
146 |
-
parse_graph_elements: bool = False,
|
147 |
-
) -> Union[str, GraphElement]:
|
148 |
-
r"""Run the agent to extract node and relationship information.
|
149 |
-
|
150 |
-
Args:
|
151 |
-
element (Element): The input element.
|
152 |
-
parse_graph_elements (bool, optional): Whether to parse into
|
153 |
-
`GraphElement`. Defaults to `False`.
|
154 |
-
|
155 |
-
Returns:
|
156 |
-
Union[str, GraphElement]: The extracted node and relationship
|
157 |
-
information. If `parse_graph_elements` is `True` then return
|
158 |
-
`GraphElement`, else return `str`.
|
159 |
-
"""
|
160 |
-
self.reset()
|
161 |
-
self.element = element
|
162 |
-
|
163 |
-
knowledge_graph_prompt = TextPrompt(text_prompt)
|
164 |
-
knowledge_graph_generation = knowledge_graph_prompt.format(
|
165 |
-
task=str(element)
|
166 |
-
)
|
167 |
-
|
168 |
-
knowledge_graph_generation_msg = BaseMessage.make_user_message(
|
169 |
-
role_name="Graphify", content=knowledge_graph_generation
|
170 |
-
)
|
171 |
-
|
172 |
-
response = self.step(input_message=knowledge_graph_generation_msg)
|
173 |
-
|
174 |
-
content = response.msg.content
|
175 |
-
|
176 |
-
if parse_graph_elements:
|
177 |
-
content = self._parse_graph_elements(content)
|
178 |
-
|
179 |
-
return content
|
180 |
-
|
181 |
-
def _validate_node(self, node: Node) -> bool:
|
182 |
-
r"""Validate if the object is a valid Node.
|
183 |
-
|
184 |
-
Args:
|
185 |
-
node (Node): Object to be validated.
|
186 |
-
|
187 |
-
Returns:
|
188 |
-
bool: True if the object is a valid Node, False otherwise.
|
189 |
-
"""
|
190 |
-
return (
|
191 |
-
isinstance(node, Node)
|
192 |
-
and isinstance(node.id, (str, int))
|
193 |
-
and isinstance(node.type, str)
|
194 |
-
)
|
195 |
-
|
196 |
-
def _validate_relationship(self, relationship: Relationship) -> bool:
|
197 |
-
r"""Validate if the object is a valid Relationship.
|
198 |
-
|
199 |
-
Args:
|
200 |
-
relationship (Relationship): Object to be validated.
|
201 |
-
|
202 |
-
Returns:
|
203 |
-
bool: True if the object is a valid Relationship, False otherwise.
|
204 |
-
"""
|
205 |
-
return (
|
206 |
-
isinstance(relationship, Relationship)
|
207 |
-
and self._validate_node(relationship.subj)
|
208 |
-
and self._validate_node(relationship.obj)
|
209 |
-
and isinstance(relationship.type, str)
|
210 |
-
)
|
211 |
-
|
212 |
-
def _parse_graph_elements(self, input_string: str) -> GraphElement:
|
213 |
-
r"""Parses graph elements from given content.
|
214 |
-
|
215 |
-
Args:
|
216 |
-
input_string (str): The input content.
|
217 |
-
|
218 |
-
Returns:
|
219 |
-
GraphElement: The parsed graph elements.
|
220 |
-
"""
|
221 |
-
import re
|
222 |
-
|
223 |
-
# Regular expressions to extract nodes and relationships
|
224 |
-
node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
|
225 |
-
rel_pattern = (
|
226 |
-
r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
|
227 |
-
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
|
228 |
-
)
|
229 |
-
|
230 |
-
nodes = {}
|
231 |
-
relationships = []
|
232 |
-
|
233 |
-
# Extract nodes
|
234 |
-
for match in re.finditer(node_pattern, input_string):
|
235 |
-
id, type = match.groups()
|
236 |
-
properties = {'source': 'agent_created'}
|
237 |
-
if id not in nodes:
|
238 |
-
node = Node(id=id, type=type, properties=properties)
|
239 |
-
if self._validate_node(node):
|
240 |
-
nodes[id] = node
|
241 |
-
|
242 |
-
# Extract relationships
|
243 |
-
for match in re.finditer(rel_pattern, input_string):
|
244 |
-
subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
|
245 |
-
properties = {'source': 'agent_created'}
|
246 |
-
if subj_id in nodes and obj_id in nodes:
|
247 |
-
subj = nodes[subj_id]
|
248 |
-
obj = nodes[obj_id]
|
249 |
-
relationship = Relationship(
|
250 |
-
subj=subj, obj=obj, type=rel_type, properties=properties
|
251 |
-
)
|
252 |
-
if self._validate_relationship(relationship):
|
253 |
-
relationships.append(relationship)
|
254 |
-
|
255 |
-
return GraphElement(
|
256 |
-
nodes=list(nodes.values()),
|
257 |
-
relationships=relationships,
|
258 |
-
source=self.element,
|
259 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/role_assignment_agent.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
import re
|
15 |
-
from typing import Dict, Optional, Union
|
16 |
-
|
17 |
-
from camel.agents.chat_agent import ChatAgent
|
18 |
-
from camel.messages import BaseMessage
|
19 |
-
from camel.models import BaseModelBackend
|
20 |
-
from camel.prompts import TextPrompt
|
21 |
-
from camel.types import RoleType
|
22 |
-
|
23 |
-
# AgentOps decorator setting
|
24 |
-
try:
|
25 |
-
import os
|
26 |
-
|
27 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
28 |
-
from agentops import track_agent
|
29 |
-
else:
|
30 |
-
raise ImportError
|
31 |
-
except (ImportError, AttributeError):
|
32 |
-
from camel.utils import track_agent
|
33 |
-
|
34 |
-
|
35 |
-
@track_agent(name="RoleAssignmentAgent")
|
36 |
-
class RoleAssignmentAgent(ChatAgent):
|
37 |
-
r"""An agent that generates role names based on the task prompt.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
model (BaseModelBackend, optional): The model backend to use for
|
41 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
42 |
-
`GPT_4O_MINI`)
|
43 |
-
|
44 |
-
Attributes:
|
45 |
-
role_assignment_prompt (TextPrompt): A prompt for the agent to generate
|
46 |
-
role names.
|
47 |
-
"""
|
48 |
-
|
49 |
-
def __init__(
|
50 |
-
self,
|
51 |
-
model: Optional[BaseModelBackend] = None,
|
52 |
-
) -> None:
|
53 |
-
system_message = BaseMessage(
|
54 |
-
role_name="Role Assigner",
|
55 |
-
role_type=RoleType.ASSISTANT,
|
56 |
-
meta_dict=None,
|
57 |
-
content="You assign roles based on tasks.",
|
58 |
-
)
|
59 |
-
super().__init__(system_message, model=model)
|
60 |
-
|
61 |
-
def run(
|
62 |
-
self,
|
63 |
-
task_prompt: Union[str, TextPrompt],
|
64 |
-
num_roles: int = 2,
|
65 |
-
) -> Dict[str, str]:
|
66 |
-
r"""Generate role names based on the input task prompt.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
task_prompt (Union[str, TextPrompt]): The prompt
|
70 |
-
for the task based on which the roles are to be generated.
|
71 |
-
num_roles (int, optional): The number of roles to generate.
|
72 |
-
(default: :obj:`2`)
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
Dict[str, str]: A dictionary mapping role names to their
|
76 |
-
descriptions.
|
77 |
-
"""
|
78 |
-
self.reset()
|
79 |
-
|
80 |
-
expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join(
|
81 |
-
f"Domain expert {i + 1}: <BLANK>\n"
|
82 |
-
f"Associated competencies, characteristics, duties "
|
83 |
-
f"and workflows: <BLANK>. End."
|
84 |
-
for i in range(num_roles or 0)
|
85 |
-
)
|
86 |
-
role_assignment_generation_prompt = TextPrompt(
|
87 |
-
"You are a role assignment agent, and you're in charge of "
|
88 |
-
+ "recruiting {num_roles} experts for the following task."
|
89 |
-
+ "\n==== TASK =====\n {task}\n\n"
|
90 |
-
+ "Identify the domain experts you'd recruit and detail their "
|
91 |
-
+ "associated competencies, characteristics, duties and workflows "
|
92 |
-
+ "to complete the task.\n "
|
93 |
-
+ "Your answer MUST adhere to the format of ANSWER PROMPT, and "
|
94 |
-
+ "ONLY answer the BLANKs.\n"
|
95 |
-
+ expert_prompt
|
96 |
-
)
|
97 |
-
role_assignment_generation = role_assignment_generation_prompt.format(
|
98 |
-
num_roles=num_roles, task=task_prompt
|
99 |
-
)
|
100 |
-
|
101 |
-
role_assignment_generation_msg = BaseMessage.make_user_message(
|
102 |
-
role_name="Role Assigner", content=role_assignment_generation
|
103 |
-
)
|
104 |
-
|
105 |
-
response = self.step(input_message=role_assignment_generation_msg)
|
106 |
-
|
107 |
-
msg = response.msg # type: BaseMessage
|
108 |
-
terminated = response.terminated
|
109 |
-
|
110 |
-
# Distribute the output completions into role names and descriptions
|
111 |
-
role_names = [
|
112 |
-
desc.replace("<|", "").replace("|>", "")
|
113 |
-
for desc in re.findall(
|
114 |
-
r"Domain expert \d: (.+?)\nAssociated competencies,",
|
115 |
-
msg.content,
|
116 |
-
re.DOTALL,
|
117 |
-
)
|
118 |
-
]
|
119 |
-
role_descriptions = [
|
120 |
-
desc.replace("<|", "").replace("|>", "")
|
121 |
-
for desc in re.findall(
|
122 |
-
r"Associated competencies, characteristics, "
|
123 |
-
r"duties and workflows: (.+?) End.",
|
124 |
-
msg.content,
|
125 |
-
re.DOTALL,
|
126 |
-
)
|
127 |
-
]
|
128 |
-
|
129 |
-
if len(role_names) != num_roles or len(role_descriptions) != num_roles:
|
130 |
-
raise RuntimeError(
|
131 |
-
"Got None or insufficient information of roles."
|
132 |
-
)
|
133 |
-
if terminated:
|
134 |
-
raise RuntimeError("Role assignment failed.")
|
135 |
-
|
136 |
-
role_descriptions_dict = {
|
137 |
-
role_name: description
|
138 |
-
for role_name, description in zip(role_names, role_descriptions)
|
139 |
-
}
|
140 |
-
|
141 |
-
return role_descriptions_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/search_agent.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from typing import Optional
|
15 |
-
|
16 |
-
from camel.agents.chat_agent import ChatAgent
|
17 |
-
from camel.messages import BaseMessage
|
18 |
-
from camel.models import BaseModelBackend
|
19 |
-
from camel.prompts import TextPrompt
|
20 |
-
from camel.types import RoleType
|
21 |
-
from camel.utils import create_chunks
|
22 |
-
|
23 |
-
# AgentOps decorator setting
|
24 |
-
try:
|
25 |
-
import os
|
26 |
-
|
27 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
28 |
-
from agentops import track_agent
|
29 |
-
else:
|
30 |
-
raise ImportError
|
31 |
-
except (ImportError, AttributeError):
|
32 |
-
from camel.utils import track_agent
|
33 |
-
|
34 |
-
|
35 |
-
@track_agent(name="SearchAgent")
|
36 |
-
class SearchAgent(ChatAgent):
|
37 |
-
r"""An agent that summarizes text based on a query and evaluates the
|
38 |
-
relevance of an answer.
|
39 |
-
|
40 |
-
Args:
|
41 |
-
model (BaseModelBackend, optional): The model backend to use for
|
42 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
43 |
-
`GPT_4O_MINI`)
|
44 |
-
"""
|
45 |
-
|
46 |
-
def __init__(
|
47 |
-
self,
|
48 |
-
model: Optional[BaseModelBackend] = None,
|
49 |
-
) -> None:
|
50 |
-
system_message = BaseMessage(
|
51 |
-
role_name="Assistant",
|
52 |
-
role_type=RoleType.ASSISTANT,
|
53 |
-
meta_dict=None,
|
54 |
-
content="You are a helpful assistant.",
|
55 |
-
)
|
56 |
-
super().__init__(system_message, model=model)
|
57 |
-
|
58 |
-
def summarize_text(self, text: str, query: str) -> str:
|
59 |
-
r"""Summarize the information from the text, base on the query.
|
60 |
-
|
61 |
-
Args:
|
62 |
-
text (str): Text to summarize.
|
63 |
-
query (str): What information you want.
|
64 |
-
|
65 |
-
Returns:
|
66 |
-
str: Strings with information.
|
67 |
-
"""
|
68 |
-
self.reset()
|
69 |
-
|
70 |
-
summary_prompt = TextPrompt(
|
71 |
-
'''Gather information from this text that relative to the
|
72 |
-
question, but do not directly answer the question.\nquestion:
|
73 |
-
{query}\ntext '''
|
74 |
-
)
|
75 |
-
summary_prompt = summary_prompt.format(query=query)
|
76 |
-
# Max length of each chunk
|
77 |
-
max_len = 3000
|
78 |
-
results = ""
|
79 |
-
chunks = create_chunks(text, max_len)
|
80 |
-
# Summarize
|
81 |
-
for i, chunk in enumerate(chunks, start=1):
|
82 |
-
prompt = summary_prompt + str(i) + ": " + chunk
|
83 |
-
user_msg = BaseMessage.make_user_message(
|
84 |
-
role_name="User",
|
85 |
-
content=prompt,
|
86 |
-
)
|
87 |
-
result = self.step(user_msg).msg.content
|
88 |
-
results += result + "\n"
|
89 |
-
|
90 |
-
# Final summarization
|
91 |
-
final_prompt = TextPrompt(
|
92 |
-
'''Here are some summarized texts which split from one text. Using
|
93 |
-
the information to answer the question. If can't find the answer,
|
94 |
-
you must answer "I can not find the answer to the query" and
|
95 |
-
explain why.\n Query:\n{query}.\n\nText:\n'''
|
96 |
-
)
|
97 |
-
final_prompt = final_prompt.format(query=query)
|
98 |
-
prompt = final_prompt + results
|
99 |
-
|
100 |
-
user_msg = BaseMessage.make_user_message(
|
101 |
-
role_name="User",
|
102 |
-
content=prompt,
|
103 |
-
)
|
104 |
-
response = self.step(user_msg).msg.content
|
105 |
-
|
106 |
-
return response
|
107 |
-
|
108 |
-
def continue_search(self, query: str, answer: str) -> bool:
|
109 |
-
r"""Ask whether to continue search or not based on the provided answer.
|
110 |
-
|
111 |
-
Args:
|
112 |
-
query (str): The question.
|
113 |
-
answer (str): The answer to the question.
|
114 |
-
|
115 |
-
Returns:
|
116 |
-
bool: `True` if the user want to continue search, `False`
|
117 |
-
otherwise.
|
118 |
-
"""
|
119 |
-
prompt = TextPrompt(
|
120 |
-
"Do you think the ANSWER can answer the QUERY? "
|
121 |
-
"Use only 'yes' or 'no' to answer.\n"
|
122 |
-
"===== QUERY =====\n{query}\n\n"
|
123 |
-
"===== ANSWER =====\n{answer}"
|
124 |
-
)
|
125 |
-
prompt = prompt.format(query=query, answer=answer)
|
126 |
-
user_msg = BaseMessage.make_user_message(
|
127 |
-
role_name="User",
|
128 |
-
content=prompt,
|
129 |
-
)
|
130 |
-
response = self.step(user_msg).msg.content
|
131 |
-
if "yes" in str(response).lower():
|
132 |
-
return False
|
133 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/task_agent.py
DELETED
@@ -1,410 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from typing import Any, Dict, List, Optional, Union
|
15 |
-
|
16 |
-
from camel.agents.chat_agent import ChatAgent
|
17 |
-
from camel.messages import BaseMessage
|
18 |
-
from camel.models import BaseModelBackend
|
19 |
-
from camel.prompts import PromptTemplateGenerator, TextPrompt
|
20 |
-
from camel.types import RoleType, TaskType
|
21 |
-
from camel.utils import get_task_list
|
22 |
-
|
23 |
-
# AgentOps decorator setting
|
24 |
-
try:
|
25 |
-
import os
|
26 |
-
|
27 |
-
if os.getenv("AGENTOPS_API_KEY") is not None:
|
28 |
-
from agentops import track_agent
|
29 |
-
else:
|
30 |
-
raise ImportError
|
31 |
-
except (ImportError, AttributeError):
|
32 |
-
from camel.utils import track_agent
|
33 |
-
|
34 |
-
|
35 |
-
@track_agent(name="TaskSpecifyAgent")
|
36 |
-
class TaskSpecifyAgent(ChatAgent):
|
37 |
-
r"""An agent that specifies a given task prompt by prompting the user to
|
38 |
-
provide more details.
|
39 |
-
|
40 |
-
Attributes:
|
41 |
-
DEFAULT_WORD_LIMIT (int): The default word limit for the task prompt.
|
42 |
-
task_specify_prompt (TextPrompt): The prompt for specifying the task.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
model (BaseModelBackend, optional): The model backend to use for
|
46 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
47 |
-
`GPT_4O_MINI`)
|
48 |
-
task_type (TaskType, optional): The type of task for which to generate
|
49 |
-
a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
|
50 |
-
task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
|
51 |
-
specifying the task. (default: :obj:`None`)
|
52 |
-
word_limit (int, optional): The word limit for the task prompt.
|
53 |
-
(default: :obj:`50`)
|
54 |
-
output_language (str, optional): The language to be output by the
|
55 |
-
agent. (default: :obj:`None`)
|
56 |
-
"""
|
57 |
-
|
58 |
-
DEFAULT_WORD_LIMIT = 50
|
59 |
-
|
60 |
-
def __init__(
|
61 |
-
self,
|
62 |
-
model: Optional[BaseModelBackend] = None,
|
63 |
-
task_type: TaskType = TaskType.AI_SOCIETY,
|
64 |
-
task_specify_prompt: Optional[Union[str, TextPrompt]] = None,
|
65 |
-
word_limit: int = DEFAULT_WORD_LIMIT,
|
66 |
-
output_language: Optional[str] = None,
|
67 |
-
) -> None:
|
68 |
-
self.task_specify_prompt: Union[str, TextPrompt]
|
69 |
-
if task_specify_prompt is None:
|
70 |
-
task_specify_prompt_template = (
|
71 |
-
PromptTemplateGenerator().get_task_specify_prompt(task_type)
|
72 |
-
)
|
73 |
-
|
74 |
-
self.task_specify_prompt = task_specify_prompt_template.format(
|
75 |
-
word_limit=word_limit
|
76 |
-
)
|
77 |
-
else:
|
78 |
-
self.task_specify_prompt = TextPrompt(task_specify_prompt)
|
79 |
-
|
80 |
-
system_message = BaseMessage(
|
81 |
-
role_name="Task Specifier",
|
82 |
-
role_type=RoleType.ASSISTANT,
|
83 |
-
meta_dict=None,
|
84 |
-
content="You can make a task more specific.",
|
85 |
-
)
|
86 |
-
|
87 |
-
super().__init__(
|
88 |
-
system_message,
|
89 |
-
model=model,
|
90 |
-
output_language=output_language,
|
91 |
-
)
|
92 |
-
|
93 |
-
def run(
|
94 |
-
self,
|
95 |
-
task_prompt: Union[str, TextPrompt],
|
96 |
-
meta_dict: Optional[Dict[str, Any]] = None,
|
97 |
-
) -> TextPrompt:
|
98 |
-
r"""Specify the given task prompt by providing more details.
|
99 |
-
|
100 |
-
Args:
|
101 |
-
task_prompt (Union[str, TextPrompt]): The original task
|
102 |
-
prompt.
|
103 |
-
meta_dict (Dict[str, Any], optional): A dictionary containing
|
104 |
-
additional information to include in the prompt.
|
105 |
-
(default: :obj:`None`)
|
106 |
-
|
107 |
-
Returns:
|
108 |
-
TextPrompt: The specified task prompt.
|
109 |
-
"""
|
110 |
-
self.reset()
|
111 |
-
task_specify_prompt = self.task_specify_prompt.format(task=task_prompt)
|
112 |
-
|
113 |
-
if meta_dict is not None:
|
114 |
-
task_specify_prompt = task_specify_prompt.format(**meta_dict)
|
115 |
-
task_msg = BaseMessage.make_user_message(
|
116 |
-
role_name="Task Specifier", content=task_specify_prompt
|
117 |
-
)
|
118 |
-
specifier_response = self.step(task_msg)
|
119 |
-
|
120 |
-
if specifier_response.terminated:
|
121 |
-
raise RuntimeError("Task specification failed.")
|
122 |
-
if len(specifier_response.msgs) == 0:
|
123 |
-
raise RuntimeError("Got no specification message.")
|
124 |
-
|
125 |
-
specified_task_msg = specifier_response.msgs[0]
|
126 |
-
|
127 |
-
return TextPrompt(specified_task_msg.content)
|
128 |
-
|
129 |
-
|
130 |
-
@track_agent(name="TaskPlannerAgent")
|
131 |
-
class TaskPlannerAgent(ChatAgent):
|
132 |
-
r"""An agent that helps divide a task into subtasks based on the input
|
133 |
-
task prompt.
|
134 |
-
|
135 |
-
Attributes:
|
136 |
-
task_planner_prompt (TextPrompt): A prompt for the agent to divide
|
137 |
-
the task into subtasks.
|
138 |
-
|
139 |
-
Args:
|
140 |
-
model (BaseModelBackend, optional): The model backend to use for
|
141 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
142 |
-
`GPT_4O_MINI`)
|
143 |
-
output_language (str, optional): The language to be output by the
|
144 |
-
agent. (default: :obj:`None`)
|
145 |
-
"""
|
146 |
-
|
147 |
-
def __init__(
|
148 |
-
self,
|
149 |
-
model: Optional[BaseModelBackend] = None,
|
150 |
-
output_language: Optional[str] = None,
|
151 |
-
) -> None:
|
152 |
-
self.task_planner_prompt = TextPrompt(
|
153 |
-
"Divide this task into subtasks: {task}. Be concise."
|
154 |
-
)
|
155 |
-
system_message = BaseMessage(
|
156 |
-
role_name="Task Planner",
|
157 |
-
role_type=RoleType.ASSISTANT,
|
158 |
-
meta_dict=None,
|
159 |
-
content="You are a helpful task planner.",
|
160 |
-
)
|
161 |
-
|
162 |
-
super().__init__(
|
163 |
-
system_message,
|
164 |
-
model=model,
|
165 |
-
output_language=output_language,
|
166 |
-
)
|
167 |
-
|
168 |
-
def run(
|
169 |
-
self,
|
170 |
-
task_prompt: Union[str, TextPrompt],
|
171 |
-
) -> TextPrompt:
|
172 |
-
r"""Generate subtasks based on the input task prompt.
|
173 |
-
|
174 |
-
Args:
|
175 |
-
task_prompt (Union[str, TextPrompt]): The prompt for the task to
|
176 |
-
be divided into subtasks.
|
177 |
-
|
178 |
-
Returns:
|
179 |
-
TextPrompt: A prompt for the subtasks generated by the agent.
|
180 |
-
"""
|
181 |
-
# TODO: Maybe include roles information.
|
182 |
-
self.reset()
|
183 |
-
task_planner_prompt = self.task_planner_prompt.format(task=task_prompt)
|
184 |
-
|
185 |
-
task_msg = BaseMessage.make_user_message(
|
186 |
-
role_name="Task Planner", content=task_planner_prompt
|
187 |
-
)
|
188 |
-
|
189 |
-
task_response = self.step(task_msg)
|
190 |
-
|
191 |
-
if task_response.terminated:
|
192 |
-
raise RuntimeError("Task planning failed.")
|
193 |
-
if len(task_response.msgs) == 0:
|
194 |
-
raise RuntimeError("Got no task planning message.")
|
195 |
-
|
196 |
-
sub_tasks_msg = task_response.msgs[0]
|
197 |
-
return TextPrompt(sub_tasks_msg.content)
|
198 |
-
|
199 |
-
|
200 |
-
@track_agent(name="TaskCreationAgent")
|
201 |
-
class TaskCreationAgent(ChatAgent):
|
202 |
-
r"""An agent that helps create new tasks based on the objective
|
203 |
-
and last completed task. Compared to :obj:`TaskPlannerAgent`,
|
204 |
-
it's still a task planner, but it has more context information
|
205 |
-
like last task and incomplete task list. Modified from
|
206 |
-
`BabyAGI <https://github.com/yoheinakajima/babyagi>`_.
|
207 |
-
|
208 |
-
Attributes:
|
209 |
-
task_creation_prompt (TextPrompt): A prompt for the agent to
|
210 |
-
create new tasks.
|
211 |
-
|
212 |
-
Args:
|
213 |
-
role_name (str): The role name of the Agent to create the task.
|
214 |
-
objective (Union[str, TextPrompt]): The objective of the Agent to
|
215 |
-
perform the task.
|
216 |
-
model (BaseModelBackend, optional): The LLM backend to use for
|
217 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
218 |
-
`GPT_4O_MINI`)
|
219 |
-
output_language (str, optional): The language to be output by the
|
220 |
-
agent. (default: :obj:`None`)
|
221 |
-
message_window_size (int, optional): The maximum number of previous
|
222 |
-
messages to include in the context window. If `None`, no windowing
|
223 |
-
is performed. (default: :obj:`None`)
|
224 |
-
max_task_num (int, optional): The maximum number of planned
|
225 |
-
tasks in one round. (default: :obj:3)
|
226 |
-
"""
|
227 |
-
|
228 |
-
def __init__(
|
229 |
-
self,
|
230 |
-
role_name: str,
|
231 |
-
objective: Union[str, TextPrompt],
|
232 |
-
model: Optional[BaseModelBackend] = None,
|
233 |
-
output_language: Optional[str] = None,
|
234 |
-
message_window_size: Optional[int] = None,
|
235 |
-
max_task_num: Optional[int] = 3,
|
236 |
-
) -> None:
|
237 |
-
task_creation_prompt = TextPrompt(
|
238 |
-
"""Create new a task with the following objective: {objective}.
|
239 |
-
Never forget you are a Task Creator of {role_name}.
|
240 |
-
You must instruct me based on my expertise and your needs to solve the task.
|
241 |
-
You should consider past solved tasks and in-progress tasks: {task_list}.
|
242 |
-
The new created tasks must not overlap with these past tasks.
|
243 |
-
The result must be a numbered list in the format:
|
244 |
-
|
245 |
-
#. First Task
|
246 |
-
#. Second Task
|
247 |
-
#. Third Task
|
248 |
-
|
249 |
-
You can only give me up to {max_task_num} tasks at a time. \
|
250 |
-
Each task should be concise, concrete and doable for a {role_name}.
|
251 |
-
You should make task plan and not ask me questions.
|
252 |
-
If you think no new tasks are needed right now, write "No tasks to add."
|
253 |
-
Now start to give me new tasks one by one. No more than three tasks.
|
254 |
-
Be concrete.
|
255 |
-
"""
|
256 |
-
)
|
257 |
-
|
258 |
-
self.task_creation_prompt = task_creation_prompt.format(
|
259 |
-
objective=objective, role_name=role_name, max_task_num=max_task_num
|
260 |
-
)
|
261 |
-
self.objective = objective
|
262 |
-
|
263 |
-
system_message = BaseMessage(
|
264 |
-
role_name="Task Creator",
|
265 |
-
role_type=RoleType.ASSISTANT,
|
266 |
-
meta_dict=None,
|
267 |
-
content="You are a helpful task creator.",
|
268 |
-
)
|
269 |
-
|
270 |
-
super().__init__(
|
271 |
-
system_message,
|
272 |
-
model=model,
|
273 |
-
output_language=output_language,
|
274 |
-
message_window_size=message_window_size,
|
275 |
-
)
|
276 |
-
|
277 |
-
def run(
|
278 |
-
self,
|
279 |
-
task_list: List[str],
|
280 |
-
) -> List[str]:
|
281 |
-
r"""Generate subtasks based on the previous task results and
|
282 |
-
incomplete task list.
|
283 |
-
|
284 |
-
Args:
|
285 |
-
task_list (List[str]): The completed or in-progress
|
286 |
-
tasks which should not overlap with new created tasks.
|
287 |
-
|
288 |
-
Returns:
|
289 |
-
List[str]: The new task list generated by the Agent.
|
290 |
-
"""
|
291 |
-
|
292 |
-
if len(task_list) > 0:
|
293 |
-
task_creation_prompt = self.task_creation_prompt.format(
|
294 |
-
task_list=task_list
|
295 |
-
)
|
296 |
-
else:
|
297 |
-
task_creation_prompt = self.task_creation_prompt.format(
|
298 |
-
task_list=""
|
299 |
-
)
|
300 |
-
|
301 |
-
task_msg = BaseMessage.make_user_message(
|
302 |
-
role_name="Task Creator", content=task_creation_prompt
|
303 |
-
)
|
304 |
-
task_response = self.step(task_msg)
|
305 |
-
|
306 |
-
if task_response.terminated:
|
307 |
-
raise RuntimeError("Task creation failed.")
|
308 |
-
if len(task_response.msgs) == 0:
|
309 |
-
raise RuntimeError("Got no task creation message.")
|
310 |
-
|
311 |
-
sub_tasks_msg = task_response.msgs[0]
|
312 |
-
return get_task_list(sub_tasks_msg.content)
|
313 |
-
|
314 |
-
|
315 |
-
@track_agent(name="TaskPrioritizationAgent")
|
316 |
-
class TaskPrioritizationAgent(ChatAgent):
|
317 |
-
r"""An agent that helps re-prioritize the task list and
|
318 |
-
returns numbered prioritized list. Modified from
|
319 |
-
`BabyAGI <https://github.com/yoheinakajima/babyagi>`_.
|
320 |
-
|
321 |
-
Attributes:
|
322 |
-
task_prioritization_prompt (TextPrompt): A prompt for the agent to
|
323 |
-
prioritize tasks.
|
324 |
-
|
325 |
-
Args:
|
326 |
-
objective (Union[str, TextPrompt]): The objective of the Agent to
|
327 |
-
perform the task.
|
328 |
-
model (BaseModelBackend, optional): The LLM backend to use for
|
329 |
-
generating responses. (default: :obj:`OpenAIModel` with
|
330 |
-
`GPT_4O_MINI`)
|
331 |
-
output_language (str, optional): The language to be output by the
|
332 |
-
agent. (default: :obj:`None`)
|
333 |
-
message_window_size (int, optional): The maximum number of previous
|
334 |
-
messages to include in the context window. If `None`, no windowing
|
335 |
-
is performed. (default: :obj:`None`)
|
336 |
-
"""
|
337 |
-
|
338 |
-
def __init__(
|
339 |
-
self,
|
340 |
-
objective: Union[str, TextPrompt],
|
341 |
-
model: Optional[BaseModelBackend] = None,
|
342 |
-
output_language: Optional[str] = None,
|
343 |
-
message_window_size: Optional[int] = None,
|
344 |
-
) -> None:
|
345 |
-
task_prioritization_prompt = TextPrompt(
|
346 |
-
"""Prioritize the following tasks : {task_list}.
|
347 |
-
Consider the ultimate objective of you: {objective}.
|
348 |
-
Tasks should be sorted from highest to lowest priority, where higher-priority \
|
349 |
-
tasks are those that act as pre-requisites or are more essential for meeting \
|
350 |
-
the objective. Return one task per line in your response.
|
351 |
-
Do not remove or modify any tasks.
|
352 |
-
The result must be a numbered list in the format:
|
353 |
-
|
354 |
-
#. First task
|
355 |
-
#. Second task
|
356 |
-
|
357 |
-
The entries must be consecutively numbered, starting with 1.
|
358 |
-
The number of each entry must be followed by a period.
|
359 |
-
Do not include any headers before your ranked list or follow your list \
|
360 |
-
with any other output."""
|
361 |
-
)
|
362 |
-
|
363 |
-
self.task_prioritization_prompt = task_prioritization_prompt.format(
|
364 |
-
objective=objective
|
365 |
-
)
|
366 |
-
self.objective = objective
|
367 |
-
|
368 |
-
system_message = BaseMessage(
|
369 |
-
role_name="Task Prioritizer",
|
370 |
-
role_type=RoleType.ASSISTANT,
|
371 |
-
meta_dict=None,
|
372 |
-
content="You are a helpful task prioritizer.",
|
373 |
-
)
|
374 |
-
|
375 |
-
super().__init__(
|
376 |
-
system_message,
|
377 |
-
model=model,
|
378 |
-
output_language=output_language,
|
379 |
-
message_window_size=message_window_size,
|
380 |
-
)
|
381 |
-
|
382 |
-
def run(
|
383 |
-
self,
|
384 |
-
task_list: List[str],
|
385 |
-
) -> List[str]:
|
386 |
-
r"""Prioritize the task list given the agent objective.
|
387 |
-
|
388 |
-
Args:
|
389 |
-
task_list (List[str]): The unprioritized tasks of agent.
|
390 |
-
|
391 |
-
Returns:
|
392 |
-
List[str]: The new prioritized task list generated by the Agent.
|
393 |
-
"""
|
394 |
-
task_prioritization_prompt = self.task_prioritization_prompt.format(
|
395 |
-
task_list=task_list
|
396 |
-
)
|
397 |
-
|
398 |
-
task_msg = BaseMessage.make_user_message(
|
399 |
-
role_name="Task Prioritizer", content=task_prioritization_prompt
|
400 |
-
)
|
401 |
-
|
402 |
-
task_response = self.step(task_msg)
|
403 |
-
|
404 |
-
if task_response.terminated:
|
405 |
-
raise RuntimeError("Task prioritization failed.")
|
406 |
-
if len(task_response.msgs) == 0:
|
407 |
-
raise RuntimeError("Got no task prioritization message.")
|
408 |
-
|
409 |
-
sub_tasks_msg = task_response.msgs[0]
|
410 |
-
return get_task_list(sub_tasks_msg.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/tool_agents/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from .base import BaseToolAgent
|
15 |
-
from .hugging_face_tool_agent import HuggingFaceToolAgent
|
16 |
-
|
17 |
-
__all__ = [
|
18 |
-
'BaseToolAgent',
|
19 |
-
'HuggingFaceToolAgent',
|
20 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/tool_agents/__pycache__/__init__.cpython-311.pyc
DELETED
Binary file (383 Bytes)
|
|
owl/camel/agents/tool_agents/__pycache__/base.cpython-311.pyc
DELETED
Binary file (1.57 kB)
|
|
owl/camel/agents/tool_agents/__pycache__/hugging_face_tool_agent.cpython-311.pyc
DELETED
Binary file (10 kB)
|
|
owl/camel/agents/tool_agents/base.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from camel.agents import BaseAgent
|
15 |
-
|
16 |
-
|
17 |
-
class BaseToolAgent(BaseAgent):
|
18 |
-
r"""Creates a :obj:`BaseToolAgent` object with the specified name and
|
19 |
-
description.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
name (str): The name of the tool agent.
|
23 |
-
description (str): The description of the tool agent.
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self, name: str, description: str) -> None:
|
27 |
-
self.name = name
|
28 |
-
self.description = description
|
29 |
-
|
30 |
-
def reset(self) -> None:
|
31 |
-
r"""Resets the agent to its initial state."""
|
32 |
-
pass
|
33 |
-
|
34 |
-
def step(self) -> None:
|
35 |
-
r"""Performs a single step of the agent."""
|
36 |
-
pass
|
37 |
-
|
38 |
-
def __str__(self) -> str:
|
39 |
-
return f"{self.name}: {self.description}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/agents/tool_agents/hugging_face_tool_agent.py
DELETED
@@ -1,206 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
from typing import Any, Optional
|
15 |
-
|
16 |
-
from camel.agents.tool_agents.base import BaseToolAgent
|
17 |
-
|
18 |
-
|
19 |
-
# flake8: noqa :E501
|
20 |
-
class HuggingFaceToolAgent(BaseToolAgent):
|
21 |
-
r"""Tool agent for calling HuggingFace models. This agent is a wrapper
|
22 |
-
around agents from the `transformers` library. For more information
|
23 |
-
about the available models, please see the `transformers` documentation
|
24 |
-
at https://huggingface.co/docs/transformers/transformers_agents.
|
25 |
-
|
26 |
-
Args:
|
27 |
-
name (str): The name of the agent.
|
28 |
-
*args (Any): Additional positional arguments to pass to the underlying
|
29 |
-
Agent class.
|
30 |
-
remote (bool, optional): Flag indicating whether to run the agent
|
31 |
-
remotely. (default: :obj:`True`)
|
32 |
-
**kwargs (Any): Additional keyword arguments to pass to the underlying
|
33 |
-
Agent class.
|
34 |
-
"""
|
35 |
-
|
36 |
-
def __init__(
|
37 |
-
self,
|
38 |
-
name: str,
|
39 |
-
*args: Any,
|
40 |
-
remote: bool = True,
|
41 |
-
**kwargs: Any,
|
42 |
-
) -> None:
|
43 |
-
try:
|
44 |
-
# TODO: Support other tool agents
|
45 |
-
import transformers
|
46 |
-
from packaging import version
|
47 |
-
|
48 |
-
if version.parse(transformers.__version__) < version.parse(
|
49 |
-
"4.31.0"
|
50 |
-
):
|
51 |
-
raise ValueError(
|
52 |
-
"The version of \"transformers\" package should >= 4.31.0"
|
53 |
-
)
|
54 |
-
|
55 |
-
from transformers.tools import OpenAiAgent
|
56 |
-
from transformers.tools.agent_types import AgentImage
|
57 |
-
except (ImportError, ValueError):
|
58 |
-
raise ValueError(
|
59 |
-
"Could not import transformers tool agents. "
|
60 |
-
"Please setup the environment with "
|
61 |
-
"pip install huggingface_hub==0.14.1 transformers==4.31.0 diffusers accelerate==0.20.3 datasets torch soundfile sentencepiece opencv-python"
|
62 |
-
)
|
63 |
-
self.agent_image_type = AgentImage
|
64 |
-
self.agent = OpenAiAgent(*args, **kwargs)
|
65 |
-
description = f"""The `{name}` is a tool agent that can perform a variety of tasks including:
|
66 |
-
- Document question answering: given a document (such as a PDF) in image format, answer a question on this document
|
67 |
-
- Text question answering: given a long text and a question, answer the question in the text
|
68 |
-
- Unconditional image captioning: Caption the image!
|
69 |
-
- Image question answering: given an image, answer a question on this image
|
70 |
-
- Image segmentation: given an image and a prompt, output the segmentation mask of that prompt
|
71 |
-
- Speech to text: given an audio recording of a person talking, transcribe the speech into text
|
72 |
-
- Text to speech: convert text to speech
|
73 |
-
- Zero-shot text classification: given a text and a list of labels, identify to which label the text corresponds the most
|
74 |
-
- Text summarization: summarize a long text in one or a few sentences
|
75 |
-
- Translation: translate the text into a given language
|
76 |
-
- Text downloading: to download a text from a web URL
|
77 |
-
- Text to image: generate an image according to a prompt, leveraging stable diffusion
|
78 |
-
- Image transformation: modify an image given an initial image and a prompt, leveraging instruct pix2pix stable diffusion
|
79 |
-
- Text to video: generate a small video according to a prompt
|
80 |
-
|
81 |
-
Here are some python code examples of what you can do with this agent:
|
82 |
-
|
83 |
-
Single execution (step) mode, the single execution method is when using the step() method of the agent:
|
84 |
-
```
|
85 |
-
# Text to image
|
86 |
-
rivers_and_lakes_image = {name}.step("Draw me a picture of rivers and lakes.")
|
87 |
-
rivers_and_lakes_image.save("./rivers_and_lakes_image.png")
|
88 |
-
|
89 |
-
# Text to image -> Image transformation
|
90 |
-
sea_add_island_image = {name}.step("Draw me a picture of the sea then transform the picture to add an island")
|
91 |
-
sea_add_island_image.save("./sea_add_island_image.png")
|
92 |
-
|
93 |
-
# If you'd like to keep a state across executions or to pass non-text objects to the agent,
|
94 |
-
# you can do so by specifying variables that you would like the agent to use. For example,
|
95 |
-
# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following:
|
96 |
-
picture = {name}.step("Generate a picture of rivers and lakes.")
|
97 |
-
picture.save("./picture.png")
|
98 |
-
updated_picture = {name}.step("Transform the image in `picture` to add an island to it.", picture=picture)
|
99 |
-
updated_picture.save("./updated_picture.png")
|
100 |
-
|
101 |
-
capybara_sea_image = {name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea")
|
102 |
-
capybara_sea_image.save("./capybara_sea_image.png")
|
103 |
-
|
104 |
-
# Document question answering
|
105 |
-
answer = {name}.step(
|
106 |
-
"In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?",
|
107 |
-
document=document,
|
108 |
-
)
|
109 |
-
print(answer)
|
110 |
-
|
111 |
-
|
112 |
-
# Text to image
|
113 |
-
boat_image = {name}.step("Generate an image of a boat in the water")
|
114 |
-
boat_image.save("./boat_image.png")
|
115 |
-
|
116 |
-
# Unconditional image captioning
|
117 |
-
boat_image_caption = {name}.step("Can you caption the `boat_image`?", boat_image=boat_image)
|
118 |
-
print(boat_image_caption)
|
119 |
-
|
120 |
-
# Text to image -> Unconditional image captioning -> Text to speech
|
121 |
-
boat_audio = {name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards")
|
122 |
-
|
123 |
-
# Text downloading
|
124 |
-
document = {name}.step("Download the text from http://hf.co")
|
125 |
-
print(document)
|
126 |
-
|
127 |
-
# Text summarization
|
128 |
-
summary = {name}.step("Summarize the following text: `document`", document=document)
|
129 |
-
print(summary)
|
130 |
-
|
131 |
-
# Text downloading -> Text summarization -> Text to speech
|
132 |
-
audio = {name}.step("Read out loud the summary of http://hf.co")
|
133 |
-
```
|
134 |
-
|
135 |
-
Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method:
|
136 |
-
```
|
137 |
-
# Clean the chat history
|
138 |
-
{name}.reset()
|
139 |
-
|
140 |
-
# Text to image
|
141 |
-
capybara_image = {name}.chat("Show me an an image of a capybara")
|
142 |
-
capybara_image.save("./capybara_image.png")
|
143 |
-
|
144 |
-
# Image transformation
|
145 |
-
transformed_capybara_image = {name}.chat("Transform the image so that it snows")
|
146 |
-
transformed_capybara_image.save("./transformed_capybara_image.png")
|
147 |
-
|
148 |
-
# Image segmentation
|
149 |
-
segmented_transformed_capybara_image = {name}.chat("Show me a mask of the snowy capybaras")
|
150 |
-
segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png")
|
151 |
-
```
|
152 |
-
"""
|
153 |
-
super(HuggingFaceToolAgent, self).__init__(name, description)
|
154 |
-
self.remote = remote
|
155 |
-
|
156 |
-
def reset(self) -> None:
|
157 |
-
r"""Resets the chat history of the agent."""
|
158 |
-
self.agent.prepare_for_new_chat()
|
159 |
-
|
160 |
-
def step(
|
161 |
-
self,
|
162 |
-
*args: Any,
|
163 |
-
remote: Optional[bool] = None,
|
164 |
-
**kwargs: Any,
|
165 |
-
) -> Any:
|
166 |
-
r"""Runs the agent in single execution mode.
|
167 |
-
|
168 |
-
Args:
|
169 |
-
*args (Any): Positional arguments to pass to the agent.
|
170 |
-
remote (bool, optional): Flag indicating whether to run the agent
|
171 |
-
remotely. Overrides the default setting. (default: :obj:`None`)
|
172 |
-
**kwargs (Any): Keyword arguments to pass to the agent.
|
173 |
-
|
174 |
-
Returns:
|
175 |
-
str: The response from the agent.
|
176 |
-
"""
|
177 |
-
if remote is None:
|
178 |
-
remote = self.remote
|
179 |
-
agent_output = self.agent.run(*args, remote=remote, **kwargs)
|
180 |
-
if isinstance(agent_output, self.agent_image_type):
|
181 |
-
agent_output = agent_output.to_raw()
|
182 |
-
return agent_output
|
183 |
-
|
184 |
-
def chat(
|
185 |
-
self,
|
186 |
-
*args: Any,
|
187 |
-
remote: Optional[bool] = None,
|
188 |
-
**kwargs: Any,
|
189 |
-
) -> Any:
|
190 |
-
r"""Runs the agent in a chat conversation mode.
|
191 |
-
|
192 |
-
Args:
|
193 |
-
*args (Any): Positional arguments to pass to the agent.
|
194 |
-
remote (bool, optional): Flag indicating whether to run the agent
|
195 |
-
remotely. Overrides the default setting. (default: :obj:`None`)
|
196 |
-
**kwargs (Any): Keyword arguments to pass to the agent.
|
197 |
-
|
198 |
-
Returns:
|
199 |
-
str: The response from the agent.
|
200 |
-
"""
|
201 |
-
if remote is None:
|
202 |
-
remote = self.remote
|
203 |
-
agent_output = self.agent.chat(*args, remote=remote, **kwargs)
|
204 |
-
if isinstance(agent_output, self.agent_image_type):
|
205 |
-
agent_output = agent_output.to_raw()
|
206 |
-
return agent_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
owl/camel/benchmarks/__init__.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
-
|
15 |
-
from .base import BaseBenchmark
|
16 |
-
|
17 |
-
__all__ = ["BaseBenchmark"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|