fix conflict
Browse files- .container/Dockerfile +3 -2
- .container/build_docker.bat +77 -38
- .container/check_docker.bat +53 -27
- .container/docker-compose.yml +1 -1
- .container/run_in_docker.bat +108 -46
- README.md +208 -33
- README_zh.md +190 -15
- community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md +175 -0
- owl/.env_template +4 -3
- owl/app.py +295 -73
- owl/app_en.py +918 -0
- owl/run.py +2 -0
- owl/run_deepseek_zh.py +4 -30
- owl/run_gaia_roleplaying.py +6 -2
- owl/run_mini.py +2 -0
- owl/run_ollama.py +133 -0
- owl/run_openai_compatiable_model.py +2 -0
- owl/run_qwen_mini_zh.py +5 -4
- owl/run_qwen_zh.py +5 -3
- owl/run_terminal.py +120 -0
- owl/run_terminal_zh.py +119 -0
- owl/script_adapter.py +5 -1
- owl/utils/enhanced_role_playing.py +1 -6
- owl/utils/gaia.py +1 -1
- pyproject.toml +1 -1
- requirements.txt +1 -1
- run_app.py +13 -10
- run_app_zh.py +60 -0
- uv.lock +132 -7
.container/Dockerfile
CHANGED
@@ -76,8 +76,6 @@ COPY assets/ ./assets/
|
|
76 |
COPY README.md .
|
77 |
COPY README_zh.md .
|
78 |
|
79 |
-
# 设置环境变量文件 | Set environment variables file
|
80 |
-
COPY owl/.env_template ./owl/.env
|
81 |
|
82 |
# 创建启动脚本 | Create startup script
|
83 |
RUN echo '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
|
@@ -93,7 +91,10 @@ WORKDIR /app/owl
|
|
93 |
# 设置适当的权限 | Set appropriate permissions
|
94 |
RUN chown -R owl:owl /app
|
95 |
RUN mkdir -p /root/.cache && chown -R owl:owl /root/.cache
|
|
|
96 |
|
|
|
|
|
97 |
# 切换到非root用户 | Switch to non-root user
|
98 |
# 注意:如果需要访问/dev/shm,可能仍需要root用户 | Note: If you need to access /dev/shm, you may still need root user
|
99 |
# USER owl
|
|
|
76 |
COPY README.md .
|
77 |
COPY README_zh.md .
|
78 |
|
|
|
|
|
79 |
|
80 |
# 创建启动脚本 | Create startup script
|
81 |
RUN echo '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
|
|
|
91 |
# 设置适当的权限 | Set appropriate permissions
|
92 |
RUN chown -R owl:owl /app
|
93 |
RUN mkdir -p /root/.cache && chown -R owl:owl /root/.cache
|
94 |
+
RUN chmod 644 /app/owl/.env
|
95 |
|
96 |
+
|
97 |
+
USER owl
|
98 |
# 切换到非root用户 | Switch to non-root user
|
99 |
# 注意:如果需要访问/dev/shm,可能仍需要root用户 | Note: If you need to access /dev/shm, you may still need root user
|
100 |
# USER owl
|
.container/build_docker.bat
CHANGED
@@ -1,14 +1,18 @@
|
|
1 |
@echo off
|
|
|
2 |
setlocal enabledelayedexpansion
|
3 |
|
4 |
-
echo 在Windows上构建Docker镜像...
|
|
|
5 |
|
6 |
-
REM 设置配置变量
|
|
|
7 |
set CACHE_DIR=.docker-cache\pip
|
8 |
set BUILD_ARGS=--build-arg BUILDKIT_INLINE_CACHE=1
|
9 |
set COMPOSE_FILE=docker-compose.yml
|
10 |
|
11 |
-
REM 解析命令行参数
|
|
|
12 |
set CLEAN_CACHE=0
|
13 |
set REBUILD=0
|
14 |
set SERVICE=
|
@@ -32,80 +36,106 @@ if /i "%~1"=="--service" (
|
|
32 |
goto :parse_args
|
33 |
)
|
34 |
if /i "%~1"=="--help" (
|
35 |
-
echo
|
36 |
-
echo
|
37 |
-
echo
|
38 |
-
echo
|
39 |
-
echo --
|
40 |
-
echo --
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
exit /b 0
|
42 |
)
|
43 |
shift
|
44 |
goto :parse_args
|
45 |
:end_parse_args
|
46 |
|
47 |
-
REM 检查Docker是否安装
|
|
|
48 |
where docker >nul 2>nul
|
49 |
if %ERRORLEVEL% NEQ 0 (
|
50 |
-
echo
|
51 |
-
echo
|
|
|
|
|
52 |
pause
|
53 |
exit /b 1
|
54 |
)
|
55 |
|
56 |
-
REM 检查Docker是否运行
|
|
|
57 |
docker info >nul 2>nul
|
58 |
if %ERRORLEVEL% NEQ 0 (
|
59 |
-
echo
|
60 |
-
echo
|
|
|
|
|
61 |
pause
|
62 |
exit /b 1
|
63 |
)
|
64 |
|
65 |
-
REM 检查docker-compose.yml文件是否存在
|
|
|
66 |
if not exist "%COMPOSE_FILE%" (
|
67 |
-
echo
|
68 |
-
echo
|
|
|
|
|
69 |
pause
|
70 |
exit /b 1
|
71 |
)
|
72 |
|
73 |
-
REM 检查Docker Compose命令
|
|
|
74 |
where docker-compose >nul 2>nul
|
75 |
if %ERRORLEVEL% EQU 0 (
|
76 |
set COMPOSE_CMD=docker-compose
|
77 |
) else (
|
78 |
-
echo 尝试使用新的docker compose命令...
|
|
|
79 |
docker compose version >nul 2>nul
|
80 |
if %ERRORLEVEL% EQU 0 (
|
81 |
set COMPOSE_CMD=docker compose
|
82 |
) else (
|
83 |
-
echo
|
84 |
-
echo
|
|
|
|
|
85 |
pause
|
86 |
exit /b 1
|
87 |
)
|
88 |
)
|
89 |
|
90 |
-
REM 设置Docker BuildKit环境变量
|
|
|
91 |
set DOCKER_BUILDKIT=1
|
92 |
set COMPOSE_DOCKER_CLI_BUILD=1
|
93 |
|
94 |
-
echo 启用Docker BuildKit加速构建...
|
|
|
95 |
|
96 |
-
REM 清理缓存(如果指定)
|
|
|
97 |
if %CLEAN_CACHE% EQU 1 (
|
98 |
-
echo 清理缓存目录...
|
|
|
99 |
if exist "%CACHE_DIR%" rmdir /s /q "%CACHE_DIR%"
|
100 |
)
|
101 |
|
102 |
-
REM 创建缓存目录
|
|
|
103 |
if not exist "%CACHE_DIR%" (
|
104 |
-
echo 创建缓存目录...
|
|
|
105 |
mkdir "%CACHE_DIR%"
|
106 |
)
|
107 |
|
108 |
-
REM 添加构建时间标记
|
|
|
109 |
for /f "tokens=2 delims==" %%a in ('wmic OS Get localdatetime /value') do set "dt=%%a"
|
110 |
set "YEAR=%dt:~0,4%"
|
111 |
set "MONTH=%dt:~4,2%"
|
@@ -115,33 +145,42 @@ set "MINUTE=%dt:~10,2%"
|
|
115 |
set "BUILD_TIME=%YEAR%%MONTH%%DAY%_%HOUR%%MINUTE%"
|
116 |
set "BUILD_ARGS=%BUILD_ARGS% --build-arg BUILD_TIME=%BUILD_TIME%"
|
117 |
|
118 |
-
REM 构建Docker镜像
|
119 |
-
|
|
|
|
|
120 |
|
121 |
if "%SERVICE%"=="" (
|
122 |
if %REBUILD% EQU 1 (
|
123 |
-
echo 强制重新构建所有服务...
|
|
|
124 |
%COMPOSE_CMD% build --no-cache %BUILD_ARGS%
|
125 |
) else (
|
126 |
%COMPOSE_CMD% build %BUILD_ARGS%
|
127 |
)
|
128 |
) else (
|
129 |
if %REBUILD% EQU 1 (
|
130 |
-
echo 强制重新构建服务 %SERVICE%...
|
|
|
131 |
%COMPOSE_CMD% build --no-cache %BUILD_ARGS% %SERVICE%
|
132 |
) else (
|
133 |
-
echo 构建服务 %SERVICE%...
|
|
|
134 |
%COMPOSE_CMD% build %BUILD_ARGS% %SERVICE%
|
135 |
)
|
136 |
)
|
137 |
|
138 |
if %ERRORLEVEL% EQU 0 (
|
139 |
-
echo Docker镜像构建成功!
|
140 |
-
echo
|
141 |
-
echo
|
|
|
|
|
|
|
142 |
echo %COMPOSE_CMD% up -d
|
143 |
) else (
|
144 |
-
echo Docker镜像构建失败,请检查错误信息。
|
|
|
145 |
)
|
146 |
|
147 |
pause
|
|
|
1 |
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
setlocal enabledelayedexpansion
|
4 |
|
5 |
+
echo 在Windows上构建Docker镜像...
|
6 |
+
echo Building Docker image on Windows...
|
7 |
|
8 |
+
REM 设置配置变量
|
9 |
+
REM Set configuration variables
|
10 |
set CACHE_DIR=.docker-cache\pip
|
11 |
set BUILD_ARGS=--build-arg BUILDKIT_INLINE_CACHE=1
|
12 |
set COMPOSE_FILE=docker-compose.yml
|
13 |
|
14 |
+
REM 解析命令行参数
|
15 |
+
REM Parse command line arguments
|
16 |
set CLEAN_CACHE=0
|
17 |
set REBUILD=0
|
18 |
set SERVICE=
|
|
|
36 |
goto :parse_args
|
37 |
)
|
38 |
if /i "%~1"=="--help" (
|
39 |
+
echo 用法: build_docker.bat [选项]
|
40 |
+
echo Usage: build_docker.bat [options]
|
41 |
+
echo 选项:
|
42 |
+
echo Options:
|
43 |
+
echo --clean 清理缓存目录
|
44 |
+
echo --clean Clean cache directory
|
45 |
+
echo --rebuild 强制重新构建镜像
|
46 |
+
echo --rebuild Force rebuild image
|
47 |
+
echo --service 指定要构建的服务名称
|
48 |
+
echo --service Specify service name to build
|
49 |
+
echo --help 显示此帮助信息
|
50 |
+
echo --help Show this help message
|
51 |
exit /b 0
|
52 |
)
|
53 |
shift
|
54 |
goto :parse_args
|
55 |
:end_parse_args
|
56 |
|
57 |
+
REM 检查Docker是否安装
|
58 |
+
REM Check if Docker is installed
|
59 |
where docker >nul 2>nul
|
60 |
if %ERRORLEVEL% NEQ 0 (
|
61 |
+
echo 错误: Docker未安装
|
62 |
+
echo Error: Docker not installed
|
63 |
+
echo 请先安装Docker Desktop
|
64 |
+
echo Please install Docker Desktop first: https://docs.docker.com/desktop/install/windows-install/
|
65 |
pause
|
66 |
exit /b 1
|
67 |
)
|
68 |
|
69 |
+
REM 检查Docker是否运行
|
70 |
+
REM Check if Docker is running
|
71 |
docker info >nul 2>nul
|
72 |
if %ERRORLEVEL% NEQ 0 (
|
73 |
+
echo 错误: Docker未运行
|
74 |
+
echo Error: Docker not running
|
75 |
+
echo 请启动Docker Desktop应用程序
|
76 |
+
echo Please start Docker Desktop application
|
77 |
pause
|
78 |
exit /b 1
|
79 |
)
|
80 |
|
81 |
+
REM 检查docker-compose.yml文件是否存在
|
82 |
+
REM Check if docker-compose.yml file exists
|
83 |
if not exist "%COMPOSE_FILE%" (
|
84 |
+
echo 错误: 未找到%COMPOSE_FILE%文件
|
85 |
+
echo Error: %COMPOSE_FILE% file not found
|
86 |
+
echo 请确保在正确的目录中运行此脚本
|
87 |
+
echo Please make sure you are running this script in the correct directory
|
88 |
pause
|
89 |
exit /b 1
|
90 |
)
|
91 |
|
92 |
+
REM 检查Docker Compose命令
|
93 |
+
REM Check Docker Compose command
|
94 |
where docker-compose >nul 2>nul
|
95 |
if %ERRORLEVEL% EQU 0 (
|
96 |
set COMPOSE_CMD=docker-compose
|
97 |
) else (
|
98 |
+
echo 尝试使用新的docker compose命令...
|
99 |
+
echo Trying to use new docker compose command...
|
100 |
docker compose version >nul 2>nul
|
101 |
if %ERRORLEVEL% EQU 0 (
|
102 |
set COMPOSE_CMD=docker compose
|
103 |
) else (
|
104 |
+
echo 错误: 未找到Docker Compose命令
|
105 |
+
echo Error: Docker Compose command not found
|
106 |
+
echo 请确保Docker Desktop已正确安装
|
107 |
+
echo Please make sure Docker Desktop is properly installed
|
108 |
pause
|
109 |
exit /b 1
|
110 |
)
|
111 |
)
|
112 |
|
113 |
+
REM 设置Docker BuildKit环境变量
|
114 |
+
REM Set Docker BuildKit environment variables
|
115 |
set DOCKER_BUILDKIT=1
|
116 |
set COMPOSE_DOCKER_CLI_BUILD=1
|
117 |
|
118 |
+
echo 启用Docker BuildKit加速构建...
|
119 |
+
echo Enabling Docker BuildKit to accelerate build...
|
120 |
|
121 |
+
REM 清理缓存(如果指定)
|
122 |
+
REM Clean cache (if specified)
|
123 |
if %CLEAN_CACHE% EQU 1 (
|
124 |
+
echo 清理缓存目录...
|
125 |
+
echo Cleaning cache directory...
|
126 |
if exist "%CACHE_DIR%" rmdir /s /q "%CACHE_DIR%"
|
127 |
)
|
128 |
|
129 |
+
REM 创建缓存目录
|
130 |
+
REM Create cache directory
|
131 |
if not exist "%CACHE_DIR%" (
|
132 |
+
echo 创建缓存目录...
|
133 |
+
echo Creating cache directory...
|
134 |
mkdir "%CACHE_DIR%"
|
135 |
)
|
136 |
|
137 |
+
REM 添加构建时间标记
|
138 |
+
REM Add build time tag
|
139 |
for /f "tokens=2 delims==" %%a in ('wmic OS Get localdatetime /value') do set "dt=%%a"
|
140 |
set "YEAR=%dt:~0,4%"
|
141 |
set "MONTH=%dt:~4,2%"
|
|
|
145 |
set "BUILD_TIME=%YEAR%%MONTH%%DAY%_%HOUR%%MINUTE%"
|
146 |
set "BUILD_ARGS=%BUILD_ARGS% --build-arg BUILD_TIME=%BUILD_TIME%"
|
147 |
|
148 |
+
REM 构建Docker镜像
|
149 |
+
REM Build Docker image
|
150 |
+
echo 开始构建Docker镜像...
|
151 |
+
echo Starting to build Docker image...
|
152 |
|
153 |
if "%SERVICE%"=="" (
|
154 |
if %REBUILD% EQU 1 (
|
155 |
+
echo 强制重新构建所有服务...
|
156 |
+
echo Force rebuilding all services...
|
157 |
%COMPOSE_CMD% build --no-cache %BUILD_ARGS%
|
158 |
) else (
|
159 |
%COMPOSE_CMD% build %BUILD_ARGS%
|
160 |
)
|
161 |
) else (
|
162 |
if %REBUILD% EQU 1 (
|
163 |
+
echo 强制重新构建服务 %SERVICE%...
|
164 |
+
echo Force rebuilding service %SERVICE%...
|
165 |
%COMPOSE_CMD% build --no-cache %BUILD_ARGS% %SERVICE%
|
166 |
) else (
|
167 |
+
echo 构建服务 %SERVICE%...
|
168 |
+
echo Building service %SERVICE%...
|
169 |
%COMPOSE_CMD% build %BUILD_ARGS% %SERVICE%
|
170 |
)
|
171 |
)
|
172 |
|
173 |
if %ERRORLEVEL% EQU 0 (
|
174 |
+
echo Docker镜像构建成功!
|
175 |
+
echo Docker image build successful!
|
176 |
+
echo 构建时间: %BUILD_TIME%
|
177 |
+
echo Build time: %BUILD_TIME%
|
178 |
+
echo 可以使用以下命令启动容器:
|
179 |
+
echo You can use the following command to start the container:
|
180 |
echo %COMPOSE_CMD% up -d
|
181 |
) else (
|
182 |
+
echo Docker镜像构建失败,请检查错误信息。
|
183 |
+
echo Docker image build failed, please check error messages.
|
184 |
)
|
185 |
|
186 |
pause
|
.container/check_docker.bat
CHANGED
@@ -1,62 +1,88 @@
|
|
1 |
@echo off
|
2 |
-
|
|
|
|
|
3 |
|
4 |
-
REM 检查Docker是否安装
|
|
|
5 |
where docker >nul 2>nul
|
6 |
if %ERRORLEVEL% NEQ 0 (
|
7 |
-
echo
|
8 |
-
echo
|
9 |
-
echo
|
10 |
-
echo
|
|
|
|
|
|
|
|
|
11 |
pause
|
12 |
exit /b 1
|
13 |
)
|
14 |
|
15 |
-
echo Docker已安装
|
|
|
16 |
|
17 |
-
REM 检查Docker Compose是否安装
|
|
|
18 |
where docker-compose >nul 2>nul
|
19 |
if %ERRORLEVEL% NEQ 0 (
|
20 |
-
echo
|
|
|
21 |
docker compose version >nul 2>nul
|
22 |
if %ERRORLEVEL% NEQ 0 (
|
23 |
-
echo
|
24 |
-
echo
|
25 |
-
echo
|
|
|
|
|
|
|
26 |
pause
|
27 |
exit /b 1
|
28 |
) else (
|
29 |
-
echo 使用新的docker compose命令
|
|
|
30 |
set COMPOSE_CMD=docker compose
|
31 |
)
|
32 |
) else (
|
33 |
-
echo Docker-Compose已安装
|
|
|
34 |
set COMPOSE_CMD=docker-compose
|
35 |
)
|
36 |
|
37 |
-
REM 检查Docker是否正在运行
|
|
|
38 |
docker info >nul 2>nul
|
39 |
if %ERRORLEVEL% NEQ 0 (
|
40 |
-
echo
|
41 |
-
echo
|
|
|
|
|
42 |
pause
|
43 |
exit /b 1
|
44 |
)
|
45 |
|
46 |
-
echo Docker正在运行
|
|
|
47 |
|
48 |
-
REM 检查是否有.env文件
|
49 |
-
|
50 |
-
|
51 |
-
echo
|
52 |
-
echo
|
53 |
-
echo
|
|
|
|
|
|
|
|
|
54 |
) else (
|
55 |
-
echo 环境变量文件已存在
|
|
|
56 |
)
|
57 |
|
58 |
-
echo
|
59 |
-
echo
|
|
|
|
|
60 |
echo %COMPOSE_CMD% build
|
61 |
|
62 |
pause
|
|
|
1 |
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
+
echo 检查Docker环境...
|
4 |
+
echo Checking Docker environment...
|
5 |
|
6 |
+
REM 检查Docker是否安装
|
7 |
+
REM Check if Docker is installed
|
8 |
where docker >nul 2>nul
|
9 |
if %ERRORLEVEL% NEQ 0 (
|
10 |
+
echo 错误: Docker未安装
|
11 |
+
echo Error: Docker not installed
|
12 |
+
echo 在Windows上安装Docker的方法:
|
13 |
+
echo How to install Docker on Windows:
|
14 |
+
echo 1. 访问 https://docs.docker.com/desktop/install/windows-install/ 下载Docker Desktop
|
15 |
+
echo 1. Visit https://docs.docker.com/desktop/install/windows-install/ to download Docker Desktop
|
16 |
+
echo 2. 安装并启动Docker Desktop
|
17 |
+
echo 2. Install and start Docker Desktop
|
18 |
pause
|
19 |
exit /b 1
|
20 |
)
|
21 |
|
22 |
+
echo Docker已安装
|
23 |
+
echo Docker is installed
|
24 |
|
25 |
+
REM 检查Docker Compose是否安装
|
26 |
+
REM Check if Docker Compose is installed
|
27 |
where docker-compose >nul 2>nul
|
28 |
if %ERRORLEVEL% NEQ 0 (
|
29 |
+
echo 警告: Docker-Compose未找到,尝试使用新的docker compose命令
|
30 |
+
echo Warning: Docker-Compose not found, trying to use new docker compose command
|
31 |
docker compose version >nul 2>nul
|
32 |
if %ERRORLEVEL% NEQ 0 (
|
33 |
+
echo 错误: Docker Compose未安装
|
34 |
+
echo Error: Docker Compose not installed
|
35 |
+
echo Docker Desktop for Windows应该已包含Docker Compose
|
36 |
+
echo Docker Desktop for Windows should already include Docker Compose
|
37 |
+
echo 请确保Docker Desktop已正确安装
|
38 |
+
echo Please make sure Docker Desktop is properly installed
|
39 |
pause
|
40 |
exit /b 1
|
41 |
) else (
|
42 |
+
echo 使用新的docker compose命令
|
43 |
+
echo Using new docker compose command
|
44 |
set COMPOSE_CMD=docker compose
|
45 |
)
|
46 |
) else (
|
47 |
+
echo Docker-Compose已安装
|
48 |
+
echo Docker-Compose is installed
|
49 |
set COMPOSE_CMD=docker-compose
|
50 |
)
|
51 |
|
52 |
+
REM 检查Docker是否正在运行
|
53 |
+
REM Check if Docker is running
|
54 |
docker info >nul 2>nul
|
55 |
if %ERRORLEVEL% NEQ 0 (
|
56 |
+
echo 错误: Docker未运行
|
57 |
+
echo Error: Docker not running
|
58 |
+
echo 请启动Docker Desktop应用程序
|
59 |
+
echo Please start Docker Desktop application
|
60 |
pause
|
61 |
exit /b 1
|
62 |
)
|
63 |
|
64 |
+
echo Docker正在运行
|
65 |
+
echo Docker is running
|
66 |
|
67 |
+
REM 检查是否有.env文件
|
68 |
+
REM Check if .env file exists
|
69 |
+
if not exist "..\owl\.env" (
|
70 |
+
echo 警告: 未找到owl\.env文件
|
71 |
+
echo Warning: owl\.env file not found
|
72 |
+
echo 请运行以下命令创建环境变量文件
|
73 |
+
echo Please run the following command to create environment variable file:
|
74 |
+
echo copy ..\owl\.env_template ..\owl\.env
|
75 |
+
echo 然后编辑owl\.env文件,填写必要的API密钥
|
76 |
+
echo Then edit owl\.env file and fill in necessary API keys
|
77 |
) else (
|
78 |
+
echo 环境变量文件已存在
|
79 |
+
echo Environment variable file exists
|
80 |
)
|
81 |
|
82 |
+
echo 所有检查完成,您的系统���准备好构建和运行OWL项目的Docker容器
|
83 |
+
echo All checks completed, your system is ready to build and run OWL project Docker container
|
84 |
+
echo 请运行以下命令构建Docker镜像:
|
85 |
+
echo Please run the following command to build Docker image:
|
86 |
echo %COMPOSE_CMD% build
|
87 |
|
88 |
pause
|
.container/docker-compose.yml
CHANGED
@@ -11,7 +11,7 @@ services:
|
|
11 |
- python:3.10-slim
|
12 |
volumes:
|
13 |
# 挂载.env文件,方便配置API密钥 | Mount .env file for easy API key configuration
|
14 |
-
-
|
15 |
# 可选:挂载数据目录 | Optional: Mount data directory
|
16 |
- ./data:/app/data
|
17 |
# 挂载缓存目录,避免重复下载 | Mount cache directories to avoid repeated downloads
|
|
|
11 |
- python:3.10-slim
|
12 |
volumes:
|
13 |
# 挂载.env文件,方便配置API密钥 | Mount .env file for easy API key configuration
|
14 |
+
- ../owl/.env:/app/owl/.env
|
15 |
# 可选:挂载数据目录 | Optional: Mount data directory
|
16 |
- ./data:/app/data
|
17 |
# 挂载缓存目录,避免重复下载 | Mount cache directories to avoid repeated downloads
|
.container/run_in_docker.bat
CHANGED
@@ -1,116 +1,178 @@
|
|
1 |
@echo off
|
|
|
2 |
setlocal enabledelayedexpansion
|
3 |
|
4 |
-
REM 定义配置变量
|
|
|
5 |
set SERVICE_NAME=owl
|
6 |
set PYTHON_CMD=xvfb-python
|
7 |
set MAX_WAIT_SECONDS=60
|
8 |
set CHECK_INTERVAL_SECONDS=2
|
9 |
|
10 |
-
REM 检查参数
|
|
|
11 |
if "%~1"=="" (
|
12 |
-
echo
|
13 |
-
echo
|
14 |
-
echo
|
15 |
-
echo
|
|
|
|
|
|
|
|
|
16 |
exit /b 1
|
17 |
)
|
18 |
|
19 |
-
REM 判断第一个参数是否是脚本名称
|
|
|
20 |
set SCRIPT_NAME=%~1
|
21 |
set QUERY=%~2
|
22 |
|
23 |
if "!SCRIPT_NAME:~-3!"==".py" (
|
24 |
-
REM 如果提供了第二个参数,则为查询内容
|
|
|
25 |
if "!QUERY!"=="" (
|
26 |
-
echo
|
|
|
27 |
exit /b 1
|
28 |
)
|
29 |
) else (
|
30 |
-
REM 如果第一个参数不是脚本名称,则默认使用 run.py
|
|
|
31 |
set QUERY=!SCRIPT_NAME!
|
32 |
set SCRIPT_NAME=run.py
|
33 |
)
|
34 |
|
35 |
-
REM 检查脚本是否存在
|
36 |
-
if
|
37 |
-
|
38 |
-
echo
|
39 |
-
|
|
|
|
|
|
|
40 |
exit /b 1
|
41 |
)
|
42 |
|
43 |
-
echo
|
44 |
-
echo
|
|
|
|
|
45 |
|
46 |
-
REM
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
set line=%%a
|
50 |
set service=!line:~2,-1!
|
51 |
if not "!service!"=="" (
|
52 |
-
REM 使用第一个找到的服务名称
|
|
|
53 |
set SERVICE_NAME=!service!
|
54 |
-
echo 从docker-compose.yml
|
|
|
55 |
goto :found_service
|
56 |
)
|
57 |
)
|
58 |
)
|
59 |
:found_service
|
60 |
|
61 |
-
REM 确保Docker容器正在运行
|
62 |
-
|
|
|
63 |
if errorlevel 1 (
|
64 |
-
echo 启动Docker容器...
|
65 |
-
|
|
|
66 |
|
67 |
-
REM 使用循环检查容器是否就绪
|
68 |
-
|
|
|
|
|
69 |
set /a total_wait=0
|
70 |
|
71 |
:wait_loop
|
72 |
timeout /t !CHECK_INTERVAL_SECONDS! /nobreak > nul
|
73 |
set /a total_wait+=!CHECK_INTERVAL_SECONDS!
|
74 |
|
75 |
-
|
76 |
if errorlevel 1 (
|
77 |
if !total_wait! LSS !MAX_WAIT_SECONDS! (
|
78 |
-
echo 容器尚未就绪,已等待!total_wait!秒,继续等待...
|
|
|
79 |
goto :wait_loop
|
80 |
) else (
|
81 |
-
echo
|
82 |
-
echo
|
|
|
|
|
83 |
exit /b 1
|
84 |
)
|
85 |
) else (
|
86 |
-
echo 容器已就绪,共等待了!total_wait!秒
|
|
|
87 |
)
|
88 |
)
|
89 |
|
90 |
-
REM 检查容器中是否存在xvfb-python命令
|
91 |
-
|
92 |
-
|
|
|
|
|
93 |
if errorlevel 1 (
|
94 |
-
echo
|
|
|
95 |
set PYTHON_CMD=python
|
96 |
|
97 |
-
REM 检查python命令是否存在
|
98 |
-
|
|
|
99 |
if errorlevel 1 (
|
100 |
-
echo
|
101 |
-
echo
|
|
|
|
|
102 |
exit /b 1
|
103 |
)
|
104 |
)
|
105 |
|
106 |
-
REM 在容器中运行指定的脚本,传递查询参数
|
107 |
-
|
108 |
-
|
|
|
|
|
109 |
|
110 |
if errorlevel 0 (
|
111 |
-
echo 查询完成!
|
|
|
112 |
) else (
|
113 |
-
echo 查询执行失败,请检查错误信息。
|
|
|
114 |
)
|
115 |
|
116 |
pause
|
|
|
1 |
@echo off
|
2 |
+
chcp 65001 >nul
|
3 |
setlocal enabledelayedexpansion
|
4 |
|
5 |
+
REM 定义配置变量
|
6 |
+
REM Define configuration variables
|
7 |
set SERVICE_NAME=owl
|
8 |
set PYTHON_CMD=xvfb-python
|
9 |
set MAX_WAIT_SECONDS=60
|
10 |
set CHECK_INTERVAL_SECONDS=2
|
11 |
|
12 |
+
REM 检查参数
|
13 |
+
REM Check parameters
|
14 |
if "%~1"=="" (
|
15 |
+
echo 用法: run_in_docker.bat [脚本名称] "你的问题"
|
16 |
+
echo Usage: run_in_docker.bat [script name] "your question"
|
17 |
+
echo 例如: run_in_docker.bat run.py "什么是人工智能?"
|
18 |
+
echo Example: run_in_docker.bat run.py "What is artificial intelligence?"
|
19 |
+
echo 或者: run_in_docker.bat run_deepseek_example.py "什么是人工智能?"
|
20 |
+
echo Or: run_in_docker.bat run_deepseek_example.py "What is artificial intelligence?"
|
21 |
+
echo 如果不指定脚本名称,默认使用 run.py
|
22 |
+
echo If script name is not specified, run.py will be used by default
|
23 |
exit /b 1
|
24 |
)
|
25 |
|
26 |
+
REM 判断第一个参数是否是脚本名称
|
27 |
+
REM Determine if the first parameter is a script name
|
28 |
set SCRIPT_NAME=%~1
|
29 |
set QUERY=%~2
|
30 |
|
31 |
if "!SCRIPT_NAME:~-3!"==".py" (
|
32 |
+
REM 如果提供了第二个参数,则为查询内容
|
33 |
+
REM If a second parameter is provided, it's the query content
|
34 |
if "!QUERY!"=="" (
|
35 |
+
echo 请提供查询参数,例如: run_in_docker.bat !SCRIPT_NAME! "你的问题"
|
36 |
+
echo Please provide query parameter, e.g.: run_in_docker.bat !SCRIPT_NAME! "your question"
|
37 |
exit /b 1
|
38 |
)
|
39 |
) else (
|
40 |
+
REM 如果第一个参数不是脚本名称,则默认使用 run.py
|
41 |
+
REM If the first parameter is not a script name, use run.py by default
|
42 |
set QUERY=!SCRIPT_NAME!
|
43 |
set SCRIPT_NAME=run.py
|
44 |
)
|
45 |
|
46 |
+
REM 检查脚本是否存在
|
47 |
+
REM Check if the script exists
|
48 |
+
if not exist "..\owl\!SCRIPT_NAME!" (
|
49 |
+
echo 错误: 脚本 '..\owl\!SCRIPT_NAME!' 不存在
|
50 |
+
echo Error: Script '..\owl\!SCRIPT_NAME!' does not exist
|
51 |
+
echo 可用的脚本有:
|
52 |
+
echo Available scripts:
|
53 |
+
dir /b ..\owl\*.py | findstr /v "__"
|
54 |
exit /b 1
|
55 |
)
|
56 |
|
57 |
+
echo 使用脚本: !SCRIPT_NAME!
|
58 |
+
echo Using script: !SCRIPT_NAME!
|
59 |
+
echo 查询内容: !QUERY!
|
60 |
+
echo Query content: !QUERY!
|
61 |
|
62 |
+
REM 优先检查新版 docker compose 命令
|
63 |
+
REM Check new docker compose command first
|
64 |
+
docker compose version >nul 2>nul
|
65 |
+
if %ERRORLEVEL% EQU 0 (
|
66 |
+
echo 使用新版 docker compose 命令
|
67 |
+
echo Using new docker compose command
|
68 |
+
set COMPOSE_CMD=docker compose
|
69 |
+
) else (
|
70 |
+
REM 如果新版不可用,检查旧版 docker-compose
|
71 |
+
REM If new version is not available, check old docker-compose
|
72 |
+
where docker-compose >nul 2>nul
|
73 |
+
if %ERRORLEVEL% EQU 0 (
|
74 |
+
echo 使用旧版 docker-compose 命令
|
75 |
+
echo Using old docker-compose command
|
76 |
+
set COMPOSE_CMD=docker-compose
|
77 |
+
) else (
|
78 |
+
echo 错误: Docker Compose 未安装
|
79 |
+
echo Error: Docker Compose not installed
|
80 |
+
echo 请确保 Docker Desktop 已正确安装
|
81 |
+
echo Please make sure Docker Desktop is properly installed
|
82 |
+
pause
|
83 |
+
exit /b 1
|
84 |
+
)
|
85 |
+
)
|
86 |
+
|
87 |
+
REM 从docker-compose.yml获取服务名称(如果文件存在)
|
88 |
+
REM Get service name from docker-compose.yml (if file exists)
|
89 |
+
if exist "docker-compose.yml" (
|
90 |
+
for /f "tokens=*" %%a in ('findstr /r "^ [a-zA-Z0-9_-]*:" docker-compose.yml') do (
|
91 |
set line=%%a
|
92 |
set service=!line:~2,-1!
|
93 |
if not "!service!"=="" (
|
94 |
+
REM 使用第一个找到的服务名称
|
95 |
+
REM Use the first service name found
|
96 |
set SERVICE_NAME=!service!
|
97 |
+
echo 从docker-compose.yml检测到服务名称: !SERVICE_NAME!
|
98 |
+
echo Detected service name from docker-compose.yml: !SERVICE_NAME!
|
99 |
goto :found_service
|
100 |
)
|
101 |
)
|
102 |
)
|
103 |
:found_service
|
104 |
|
105 |
+
REM 确保Docker容器正在运行
|
106 |
+
REM Ensure Docker container is running
|
107 |
+
%COMPOSE_CMD% ps | findstr "!SERVICE_NAME!.*Up" > nul
|
108 |
if errorlevel 1 (
|
109 |
+
echo 启动Docker容器...
|
110 |
+
echo Starting Docker container...
|
111 |
+
%COMPOSE_CMD% up -d
|
112 |
|
113 |
+
REM 使用循环检查容器是否就绪
|
114 |
+
REM Use loop to check if container is ready
|
115 |
+
echo 等待容器启动...
|
116 |
+
echo Waiting for container to start...
|
117 |
set /a total_wait=0
|
118 |
|
119 |
:wait_loop
|
120 |
timeout /t !CHECK_INTERVAL_SECONDS! /nobreak > nul
|
121 |
set /a total_wait+=!CHECK_INTERVAL_SECONDS!
|
122 |
|
123 |
+
%COMPOSE_CMD% ps | findstr "!SERVICE_NAME!.*Up" > nul
|
124 |
if errorlevel 1 (
|
125 |
if !total_wait! LSS !MAX_WAIT_SECONDS! (
|
126 |
+
echo 容器尚未就绪,已等待!total_wait!秒,继续等待...
|
127 |
+
echo Container not ready yet, waited for !total_wait! seconds, continuing to wait...
|
128 |
goto :wait_loop
|
129 |
) else (
|
130 |
+
echo 错误:容器启动超时,已等待!MAX_WAIT_SECONDS!秒
|
131 |
+
echo Error: Container startup timeout, waited for !MAX_WAIT_SECONDS! seconds
|
132 |
+
echo 请检查Docker容器状态:%COMPOSE_CMD% ps
|
133 |
+
echo Please check Docker container status: %COMPOSE_CMD% ps
|
134 |
exit /b 1
|
135 |
)
|
136 |
) else (
|
137 |
+
echo 容器已就绪,共等待了!total_wait!秒
|
138 |
+
echo Container is ready, waited for !total_wait! seconds in total
|
139 |
)
|
140 |
)
|
141 |
|
142 |
+
REM 检查容器中是否存在xvfb-python命令
|
143 |
+
REM Check if xvfb-python command exists in container
|
144 |
+
echo 检查容器中的命令...
|
145 |
+
echo Checking commands in container...
|
146 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! which !PYTHON_CMD! > nul 2>&1
|
147 |
if errorlevel 1 (
|
148 |
+
echo 警告:容器中未找到!PYTHON_CMD!命令,尝试使用python替代
|
149 |
+
echo Warning: !PYTHON_CMD! command not found in container, trying to use python instead
|
150 |
set PYTHON_CMD=python
|
151 |
|
152 |
+
REM 检查python命令是否存在
|
153 |
+
REM Check if python command exists
|
154 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! which python > nul 2>&1
|
155 |
if errorlevel 1 (
|
156 |
+
echo 错误:容器中未找到python命令
|
157 |
+
echo Error: python command not found in container
|
158 |
+
echo 请检查容器配置
|
159 |
+
echo Please check container configuration
|
160 |
exit /b 1
|
161 |
)
|
162 |
)
|
163 |
|
164 |
+
REM 在容器中运行指定的脚本,传递查询参数
|
165 |
+
REM Run the specified script in container, passing query parameter
|
166 |
+
echo 在Docker容器中使用!PYTHON_CMD!运行脚本...
|
167 |
+
echo Running script in Docker container using !PYTHON_CMD!...
|
168 |
+
%COMPOSE_CMD% exec -T !SERVICE_NAME! !PYTHON_CMD! !SCRIPT_NAME! "!QUERY!"
|
169 |
|
170 |
if errorlevel 0 (
|
171 |
+
echo 查询完成!
|
172 |
+
echo Query completed!
|
173 |
) else (
|
174 |
+
echo 查询执行失败,请检查错误信息。
|
175 |
+
echo Query execution failed, please check error messages.
|
176 |
)
|
177 |
|
178 |
pause
|
README.md
CHANGED
@@ -71,25 +71,47 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
|
|
71 |
- [**Install Dependencies**](#install-dependencies)
|
72 |
- [**Setup Environment Variables**](#setup-environment-variables)
|
73 |
- [**Running with Docker**](#running-with-docker)
|
74 |
-
|
75 |
- [🚀 Quick Start](#-quick-start)
|
|
|
76 |
- [🌐 Web Interface](#-web-interface)
|
77 |
- [🧪 Experiments](#-experiments)
|
78 |
- [⏱️ Future Plans](#️-future-plans)
|
79 |
- [📄 License](#-license)
|
80 |
- [🖊️ Cite](#️-cite)
|
|
|
81 |
- [🔥 Community](#-community)
|
82 |
- [❓ FAQ](#-faq)
|
|
|
83 |
- [⭐ Star History](#-star-history)
|
84 |
|
85 |
|
86 |
# 🔥 News
|
87 |
|
88 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
# 🎬 Demo Video
|
91 |
|
92 |
-
https://
|
93 |
|
94 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
95 |
|
@@ -104,6 +126,8 @@ https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-13
|
|
104 |
|
105 |
# 🛠️ Installation
|
106 |
|
|
|
|
|
107 |
## Option 1: Using uv (Recommended)
|
108 |
|
109 |
```bash
|
@@ -181,19 +205,45 @@ pip install -r requirements.txt
|
|
181 |
conda deactivate
|
182 |
```
|
183 |
|
184 |
-
## **Setup Environment Variables**
|
185 |
|
186 |
-
|
187 |
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
```
|
192 |
-
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
|
196 |
-
> **Note**: For optimal performance, we strongly recommend using OpenAI models. Our experiments show that other models may result in significantly lower performance on complex tasks and benchmarks.
|
197 |
|
198 |
## **Running with Docker**
|
199 |
|
@@ -225,9 +275,7 @@ For more detailed Docker usage instructions, including cross-platform support, o
|
|
225 |
|
226 |
# 🚀 Quick Start
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
Run the following demo case:
|
231 |
|
232 |
```bash
|
233 |
python owl/run.py
|
@@ -235,17 +283,32 @@ python owl/run.py
|
|
235 |
|
236 |
## Running with Different Models
|
237 |
|
238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
```bash
|
241 |
# Run with Qwen model
|
242 |
-
python owl/
|
243 |
|
244 |
# Run with Deepseek model
|
245 |
-
python owl/
|
246 |
|
247 |
# Run with other OpenAI-compatible models
|
248 |
python owl/run_openai_compatiable_model.py
|
|
|
|
|
|
|
249 |
```
|
250 |
|
251 |
For a simpler version that only requires an LLM API key, you can try our minimal example:
|
@@ -280,21 +343,92 @@ print(f"\033[94mAnswer: {answer}\033[0m")
|
|
280 |
OWL will then automatically invoke document-related tools to process the file and extract the answer.
|
281 |
|
282 |
|
283 |
-
Example
|
|
|
|
|
|
|
284 |
- "Find the latest stock price for Apple Inc."
|
285 |
- "Analyze the sentiment of recent tweets about climate change"
|
286 |
- "Help me debug this Python code: [your code here]"
|
287 |
- "Summarize the main points from this research paper: [paper URL]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
# 🌐 Web Interface
|
290 |
|
291 |
-
OWL
|
|
|
|
|
292 |
|
293 |
```bash
|
|
|
|
|
|
|
|
|
294 |
python run_app.py
|
295 |
```
|
296 |
|
297 |
-
|
298 |
|
299 |
- **Easy Model Selection**: Choose between different models (OpenAI, Qwen, DeepSeek, etc.)
|
300 |
- **Environment Variable Management**: Configure your API keys and other settings directly from the UI
|
@@ -308,21 +442,25 @@ The web interface is built using Gradio and runs locally on your machine. No dat
|
|
308 |
To reproduce OWL's GAIA benchmark score of 58.18:
|
309 |
|
310 |
1. Switch to the `gaia58.18` branch:
|
311 |
-
```bash
|
312 |
-
git checkout gaia58.18
|
313 |
-
```
|
314 |
|
315 |
-
|
316 |
-
```bash
|
317 |
-
python run_gaia_roleplaying.py
|
318 |
-
```
|
|
|
|
|
319 |
|
320 |
# ⏱️ Future Plans
|
321 |
|
322 |
-
|
323 |
-
- [ ] Enhance the toolkit ecosystem with more specialized tools for domain-specific tasks.
|
324 |
-
- [ ] Develop more sophisticated agent interaction patterns and communication protocols
|
325 |
|
|
|
|
|
|
|
|
|
326 |
|
327 |
# 📄 License
|
328 |
|
@@ -343,10 +481,27 @@ If you find this repo useful, please cite:
|
|
343 |
}
|
344 |
```
|
345 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
# 🔥 Community
|
|
|
|
|
347 |
Join us for further discussions!
|
348 |
-
|
349 |
-

|
350 |
<!--  -->
|
351 |
|
352 |
# ❓ FAQ
|
@@ -355,6 +510,26 @@ Join us for further discussions!
|
|
355 |
|
356 |
A: If OWL determines that a task can be completed using non-browser tools (such as search or code execution), the browser will not be launched. The browser window will only appear when OWL determines that browser-based interaction is necessary.
|
357 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
358 |
# ⭐ Star History
|
359 |
|
360 |
[](https://star-history.com/#camel-ai/owl&Date)
|
|
|
71 |
- [**Install Dependencies**](#install-dependencies)
|
72 |
- [**Setup Environment Variables**](#setup-environment-variables)
|
73 |
- [**Running with Docker**](#running-with-docker)
|
|
|
74 |
- [🚀 Quick Start](#-quick-start)
|
75 |
+
- [🧰 Toolkits and Capabilities](#-toolkits-and-capabilities)
|
76 |
- [🌐 Web Interface](#-web-interface)
|
77 |
- [🧪 Experiments](#-experiments)
|
78 |
- [⏱️ Future Plans](#️-future-plans)
|
79 |
- [📄 License](#-license)
|
80 |
- [🖊️ Cite](#️-cite)
|
81 |
+
- [🤝 Contributing](#-contributing)
|
82 |
- [🔥 Community](#-community)
|
83 |
- [❓ FAQ](#-faq)
|
84 |
+
- [📚 Exploring CAMEL Dependency](#-exploring-camel-dependency)
|
85 |
- [⭐ Star History](#-star-history)
|
86 |
|
87 |
|
88 |
# 🔥 News
|
89 |
|
90 |
+
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
|
91 |
+
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
|
92 |
+
🌟🌟🌟 <b>COMMUNITY CALL FOR USE CASES!</b> 🌟🌟🌟
|
93 |
+
</h3>
|
94 |
+
<p style="font-size: 1.1em; margin: 10px 0;">
|
95 |
+
We're inviting the community to contribute innovative use cases for OWL! <br>
|
96 |
+
The <b>top ten submissions</b> will receive special community gifts and recognition.
|
97 |
+
</p>
|
98 |
+
<p>
|
99 |
+
<a href="https://github.com/camel-ai/owl/tree/main/community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md" style="background-color: #d81b60; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">Learn More & Submit</a>
|
100 |
+
</p>
|
101 |
+
<p style="margin: 5px 0;">
|
102 |
+
Submission deadline: <b>March 31, 2025</b>
|
103 |
+
</p>
|
104 |
+
</div>
|
105 |
+
|
106 |
+
- **[2025.03.12]**: Added Bocha search in SearchToolkit, integrated Volcano Engine model platform, and enhanced Azure and OpenAI Compatible models with structured output and tool calling.
|
107 |
+
- **[2025.03.11]**: We added MCPToolkit, FileWriteToolkit, and TerminalToolkit to enhance OWL agents with MCP tool calling, file writing capabilities, and terminal command execution.
|
108 |
+
- **[2025.03.09]**: We added a web-based user interface that makes it easier to interact with the system.
|
109 |
+
- **[2025.03.07]**: We open-sourced the codebase of the 🦉 OWL project.
|
110 |
+
- **[2025.03.03]**: OWL achieved the #1 position among open-source frameworks on the GAIA benchmark with a score of 58.18.
|
111 |
|
112 |
# 🎬 Demo Video
|
113 |
|
114 |
+
https://github.com/user-attachments/assets/2a2a825d-39ea-45c5-9ba1-f9d58efbc372
|
115 |
|
116 |
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
|
117 |
|
|
|
126 |
|
127 |
# 🛠️ Installation
|
128 |
|
129 |
+
OWL supports multiple installation methods to fit your workflow preferences. Choose the option that works best for you.
|
130 |
+
|
131 |
## Option 1: Using uv (Recommended)
|
132 |
|
133 |
```bash
|
|
|
205 |
conda deactivate
|
206 |
```
|
207 |
|
208 |
+
## **Setup Environment Variables**
|
209 |
|
210 |
+
OWL requires various API keys to interact with different services. The `owl/.env_template` file contains placeholders for all necessary API keys along with links to the services where you can register for them.
|
211 |
|
212 |
+
### Option 1: Using a `.env` File (Recommended)
|
213 |
+
|
214 |
+
1. **Copy and Rename the Template**:
|
215 |
+
```bash
|
216 |
+
cd owl
|
217 |
+
cp .env_template .env
|
218 |
+
```
|
219 |
+
|
220 |
+
2. **Configure Your API Keys**:
|
221 |
+
Open the `.env` file in your preferred text editor and insert your API keys in the corresponding fields.
|
222 |
+
|
223 |
+
> **Note**: For the minimal example (`run_mini.py`), you only need to configure the LLM API key (e.g., `OPENAI_API_KEY`).
|
224 |
+
|
225 |
+
### Option 2: Setting Environment Variables Directly
|
226 |
+
|
227 |
+
Alternatively, you can set environment variables directly in your terminal:
|
228 |
+
|
229 |
+
- **macOS/Linux (Bash/Zsh)**:
|
230 |
+
```bash
|
231 |
+
export OPENAI_API_KEY="your-openai-api-key-here"
|
232 |
+
```
|
233 |
+
|
234 |
+
- **Windows (Command Prompt)**:
|
235 |
+
```batch
|
236 |
+
set OPENAI_API_KEY="your-openai-api-key-here"
|
237 |
+
```
|
238 |
+
|
239 |
+
- **Windows (PowerShell)**:
|
240 |
+
```powershell
|
241 |
+
$env:OPENAI_API_KEY = "your-openai-api-key-here"
|
242 |
+
```
|
243 |
+
|
244 |
+
> **Note**: Environment variables set directly in the terminal will only persist for the current session.
|
245 |
|
246 |
|
|
|
247 |
|
248 |
## **Running with Docker**
|
249 |
|
|
|
275 |
|
276 |
# 🚀 Quick Start
|
277 |
|
278 |
+
After installation and setting up your environment variables, you can start using OWL right away:
|
|
|
|
|
279 |
|
280 |
```bash
|
281 |
python owl/run.py
|
|
|
283 |
|
284 |
## Running with Different Models
|
285 |
|
286 |
+
### Model Requirements
|
287 |
+
|
288 |
+
- **Tool Calling**: OWL requires models with robust tool calling capabilities to interact with various toolkits. Models must be able to understand tool descriptions, generate appropriate tool calls, and process tool outputs.
|
289 |
+
|
290 |
+
- **Multimodal Understanding**: For tasks involving web interaction, image analysis, or video processing, models with multimodal capabilities are required to interpret visual content and context.
|
291 |
+
|
292 |
+
#### Supported Models
|
293 |
+
|
294 |
+
For information on configuring AI models, please refer to our [CAMEL models documentation](https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel).
|
295 |
+
|
296 |
+
> **Note**: For optimal performance, we strongly recommend using OpenAI models (GPT-4 or later versions). Our experiments show that other models may result in significantly lower performance on complex tasks and benchmarks, especially those requiring advanced multi-modal understanding and tool use.
|
297 |
+
|
298 |
+
OWL supports various LLM backends, though capabilities may vary depending on the model's tool calling and multimodal abilities. You can use the following scripts to run with different models:
|
299 |
|
300 |
```bash
|
301 |
# Run with Qwen model
|
302 |
+
python owl/run_qwen_zh.py
|
303 |
|
304 |
# Run with Deepseek model
|
305 |
+
python owl/run_deepseek_zh.py
|
306 |
|
307 |
# Run with other OpenAI-compatible models
|
308 |
python owl/run_openai_compatiable_model.py
|
309 |
+
|
310 |
+
# Run with Ollama
|
311 |
+
python owl/run_ollama.py
|
312 |
```
|
313 |
|
314 |
For a simpler version that only requires an LLM API key, you can try our minimal example:
|
|
|
343 |
OWL will then automatically invoke document-related tools to process the file and extract the answer.
|
344 |
|
345 |
|
346 |
+
### Example Tasks
|
347 |
+
|
348 |
+
Here are some tasks you can try with OWL:
|
349 |
+
|
350 |
- "Find the latest stock price for Apple Inc."
|
351 |
- "Analyze the sentiment of recent tweets about climate change"
|
352 |
- "Help me debug this Python code: [your code here]"
|
353 |
- "Summarize the main points from this research paper: [paper URL]"
|
354 |
+
- "Create a data visualization for this dataset: [dataset path]"
|
355 |
+
|
356 |
+
# 🧰 Toolkits and Capabilities
|
357 |
+
|
358 |
+
> **Important**: Effective use of toolkits requires models with strong tool calling capabilities. For multimodal toolkits (Web, Image, Video), models must also have multimodal understanding abilities.
|
359 |
+
|
360 |
+
OWL supports various toolkits that can be customized by modifying the `tools` list in your script:
|
361 |
+
|
362 |
+
```python
|
363 |
+
# Configure toolkits
|
364 |
+
tools = [
|
365 |
+
*WebToolkit(headless=False).get_tools(), # Browser automation
|
366 |
+
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
367 |
+
*AudioAnalysisToolkit().get_tools(), # Requires OpenAI Key
|
368 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
369 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
370 |
+
SearchToolkit().search_duckduckgo,
|
371 |
+
SearchToolkit().search_google, # Comment out if unavailable
|
372 |
+
SearchToolkit().search_wiki,
|
373 |
+
*ExcelToolkit().get_tools(),
|
374 |
+
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
375 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
376 |
+
]
|
377 |
+
```
|
378 |
+
|
379 |
+
## Available Toolkits
|
380 |
+
|
381 |
+
Key toolkits include:
|
382 |
+
|
383 |
+
### Multimodal Toolkits (Require multimodal model capabilities)
|
384 |
+
- **WebToolkit**: Browser automation for web interaction and navigation
|
385 |
+
- **VideoAnalysisToolkit**: Video processing and content analysis
|
386 |
+
- **ImageAnalysisToolkit**: Image analysis and interpretation
|
387 |
+
|
388 |
+
### Text-Based Toolkits
|
389 |
+
- **AudioAnalysisToolkit**: Audio processing (requires OpenAI API)
|
390 |
+
- **CodeExecutionToolkit**: Python code execution and evaluation
|
391 |
+
- **SearchToolkit**: Web searches (Google, DuckDuckGo, Wikipedia)
|
392 |
+
- **DocumentProcessingToolkit**: Document parsing (PDF, DOCX, etc.)
|
393 |
+
|
394 |
+
Additional specialized toolkits: ArxivToolkit, GitHubToolkit, GoogleMapsToolkit, MathToolkit, NetworkXToolkit, NotionToolkit, RedditToolkit, WeatherToolkit, and more. For a complete list, see the [CAMEL toolkits documentation](https://docs.camel-ai.org/key_modules/tools.html#built-in-toolkits).
|
395 |
+
|
396 |
+
## Customizing Your Configuration
|
397 |
+
|
398 |
+
To customize available tools:
|
399 |
+
|
400 |
+
```python
|
401 |
+
# 1. Import toolkits
|
402 |
+
from camel.toolkits import WebToolkit, SearchToolkit, CodeExecutionToolkit
|
403 |
+
|
404 |
+
# 2. Configure tools list
|
405 |
+
tools = [
|
406 |
+
*WebToolkit(headless=True).get_tools(),
|
407 |
+
SearchToolkit().search_wiki,
|
408 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
409 |
+
]
|
410 |
+
|
411 |
+
# 3. Pass to assistant agent
|
412 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
413 |
+
```
|
414 |
+
|
415 |
+
Selecting only necessary toolkits optimizes performance and reduces resource usage.
|
416 |
|
417 |
# 🌐 Web Interface
|
418 |
|
419 |
+
OWL includes an intuitive web-based user interface that makes it easier to interact with the system.
|
420 |
+
|
421 |
+
## Starting the Web UI
|
422 |
|
423 |
```bash
|
424 |
+
# Start the Chinese version
|
425 |
+
python run_app_zh.py
|
426 |
+
|
427 |
+
# Start the English version
|
428 |
python run_app.py
|
429 |
```
|
430 |
|
431 |
+
## Features
|
432 |
|
433 |
- **Easy Model Selection**: Choose between different models (OpenAI, Qwen, DeepSeek, etc.)
|
434 |
- **Environment Variable Management**: Configure your API keys and other settings directly from the UI
|
|
|
442 |
To reproduce OWL's GAIA benchmark score of 58.18:
|
443 |
|
444 |
1. Switch to the `gaia58.18` branch:
|
445 |
+
```bash
|
446 |
+
git checkout gaia58.18
|
447 |
+
```
|
448 |
|
449 |
+
2. Run the evaluation script:
|
450 |
+
```bash
|
451 |
+
python run_gaia_roleplaying.py
|
452 |
+
```
|
453 |
+
|
454 |
+
This will execute the same configuration that achieved our top-ranking performance on the GAIA benchmark.
|
455 |
|
456 |
# ⏱️ Future Plans
|
457 |
|
458 |
+
We're continuously working to improve OWL. Here's what's on our roadmap:
|
|
|
|
|
459 |
|
460 |
+
- [ ] Write a technical blog post detailing our exploration and insights in multi-agent collaboration in real-world tasks
|
461 |
+
- [ ] Enhance the toolkit ecosystem with more specialized tools for domain-specific tasks
|
462 |
+
- [ ] Develop more sophisticated agent interaction patterns and communication protocols
|
463 |
+
- [ ] Improve performance on complex multi-step reasoning tasks
|
464 |
|
465 |
# 📄 License
|
466 |
|
|
|
481 |
}
|
482 |
```
|
483 |
|
484 |
+
# 🤝 Contributing
|
485 |
+
|
486 |
+
We welcome contributions from the community! Here's how you can help:
|
487 |
+
|
488 |
+
1. Read our [Contribution Guidelines](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md)
|
489 |
+
2. Check [open issues](https://github.com/camel-ai/camel/issues) or create new ones
|
490 |
+
3. Submit pull requests with your improvements
|
491 |
+
|
492 |
+
**Current Issues Open for Contribution:**
|
493 |
+
- [#1770](https://github.com/camel-ai/camel/issues/1770)
|
494 |
+
- [#1712](https://github.com/camel-ai/camel/issues/1712)
|
495 |
+
- [#1537](https://github.com/camel-ai/camel/issues/1537)
|
496 |
+
- [#1827](https://github.com/camel-ai/camel/issues/1827)
|
497 |
+
|
498 |
+
To take on an issue, simply leave a comment stating your interest.
|
499 |
+
|
500 |
# 🔥 Community
|
501 |
+
Join us ([*Discord*](https://discord.camel-ai.org/) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of finding the scaling laws of agents.
|
502 |
+
|
503 |
Join us for further discussions!
|
504 |
+

|
|
|
505 |
<!--  -->
|
506 |
|
507 |
# ❓ FAQ
|
|
|
510 |
|
511 |
A: If OWL determines that a task can be completed using non-browser tools (such as search or code execution), the browser will not be launched. The browser window will only appear when OWL determines that browser-based interaction is necessary.
|
512 |
|
513 |
+
**Q: Which Python version should I use?**
|
514 |
+
|
515 |
+
A: OWL supports Python 3.10, 3.11, and 3.12.
|
516 |
+
|
517 |
+
**Q: How can I contribute to the project?**
|
518 |
+
|
519 |
+
A: See our [Contributing](#-contributing) section for details on how to get involved. We welcome contributions of all kinds, from code improvements to documentation updates.
|
520 |
+
|
521 |
+
# 📚 Exploring CAMEL Dependency
|
522 |
+
|
523 |
+
OWL is built on top of the [CAMEL](https://github.com/camel-ai/camel) Framework, here's how you can explore the CAMEL source code and understand how it works with OWL:
|
524 |
+
|
525 |
+
## Accessing CAMEL Source Code
|
526 |
+
|
527 |
+
```bash
|
528 |
+
# Clone the CAMEL repository
|
529 |
+
git clone https://github.com/camel-ai/camel.git
|
530 |
+
cd camel
|
531 |
+
```
|
532 |
+
|
533 |
# ⭐ Star History
|
534 |
|
535 |
[](https://star-history.com/#camel-ai/owl&Date)
|
README_zh.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
<h1 align="center">
|
2 |
🦉 OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation
|
3 |
-
|
4 |
</h1>
|
5 |
|
6 |
|
@@ -73,18 +73,42 @@
|
|
73 |
- [**设置环境变量**](#设置环境变量)
|
74 |
- [**使用Docker运行**](#使用docker运行)
|
75 |
- [🚀 快速开始](#-快速开始)
|
|
|
76 |
- [🌐 网页界面](#-网页界面)
|
77 |
- [🧪 实验](#-实验)
|
78 |
- [⏱️ 未来计划](#️-未来计划)
|
79 |
- [📄 许可证](#-许可证)
|
80 |
- [🖊️ 引用](#️-引用)
|
|
|
81 |
- [🔥 社区](#-社区)
|
82 |
- [❓ 常见问题](#-常见问题)
|
|
|
|
|
83 |
|
84 |
|
85 |
# 🔥 新闻
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
- **[2025.03.07]**: 我们开源了 🦉 OWL 项目的代码库。
|
|
|
88 |
|
89 |
# 🎬 演示视频
|
90 |
|
@@ -180,15 +204,43 @@ pip install -r requirements.txt
|
|
180 |
conda deactivate
|
181 |
```
|
182 |
|
183 |
-
## **设置环境变量**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
-
|
|
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
189 |
-
|
|
|
190 |
|
191 |
-
>
|
192 |
|
193 |
## **使用Docker运行**
|
194 |
|
@@ -235,17 +287,32 @@ python owl/run_mini.py
|
|
235 |
|
236 |
## 使用不同的模型
|
237 |
|
238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
```bash
|
241 |
# 使用 Qwen 模型运行
|
242 |
-
python owl/
|
243 |
|
244 |
# 使用 Deepseek 模型运行
|
245 |
-
python owl/
|
246 |
|
247 |
# 使用其他 OpenAI 兼容模型运行
|
248 |
python owl/run_openai_compatiable_model.py
|
|
|
|
|
|
|
249 |
```
|
250 |
|
251 |
你可以通过修改 `run.py` 脚本来运行自己的任务:
|
@@ -280,11 +347,76 @@ OWL 将自动调用与文档相关的工具来处理文件并提取答案。
|
|
280 |
- "帮我调试这段 Python 代码:[在此粘贴你的代码]"
|
281 |
- "总结这篇研究论文的主要观点:[论文URL]"
|
282 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
283 |
# 🌐 网页界面
|
284 |
|
285 |
OWL 现在包含一个基于网页的用户界面,使与系统交互变得更加容易。要启动网页界面,请运行:
|
286 |
|
287 |
```bash
|
|
|
|
|
|
|
|
|
288 |
python run_app.py
|
289 |
```
|
290 |
|
@@ -314,10 +446,12 @@ python run_gaia_roleplaying.py
|
|
314 |
|
315 |
# ⏱️ 未来计划
|
316 |
|
317 |
-
|
318 |
-
- [ ] 通过引入更多针对特定领域任务的专业工具,进一步完善工具生态系统。
|
319 |
-
- [ ] 开发更复杂的智能体交互模式和通信协议
|
320 |
|
|
|
|
|
|
|
|
|
321 |
|
322 |
# 📄 许可证
|
323 |
|
@@ -338,10 +472,27 @@ python run_gaia_roleplaying.py
|
|
338 |
}
|
339 |
```
|
340 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
# 🔥 社区
|
|
|
|
|
342 |
加入我们,参与更多讨论!
|
343 |
-
|
344 |
-

|
345 |
<!--  -->
|
346 |
|
347 |
# ❓ 常见问题
|
@@ -350,6 +501,30 @@ python run_gaia_roleplaying.py
|
|
350 |
|
351 |
A: 当OWL判断某个任务可以使用非浏览器工具(如搜索、代码分析等)完成时,浏览器就不会启动。只有在判断需要使用浏览器工具的时候,本地才会弹出浏览器窗口,并进行浏览器模拟交互。
|
352 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
[docs-image]: https://img.shields.io/badge/Documentation-EB3ECC
|
354 |
[docs-url]: https://camel-ai.github.io/camel/index.html
|
355 |
[star-image]: https://img.shields.io/github/stars/camel-ai/owl?label=stars&logo=github&color=brightgreen
|
|
|
1 |
<h1 align="center">
|
2 |
🦉 OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation
|
3 |
+
🦉 OWL: 优化劳动力学习的通用智能体,用于处理现实世界的自动化任务
|
4 |
</h1>
|
5 |
|
6 |
|
|
|
73 |
- [**设置环境变量**](#设置环境变量)
|
74 |
- [**使用Docker运行**](#使用docker运行)
|
75 |
- [🚀 快速开始](#-快速开始)
|
76 |
+
- [🧰 工具包与功能](#-工具包与功能)
|
77 |
- [🌐 网页界面](#-网页界面)
|
78 |
- [🧪 实验](#-实验)
|
79 |
- [⏱️ 未来计划](#️-未来计划)
|
80 |
- [📄 许可证](#-许可证)
|
81 |
- [🖊️ 引用](#️-引用)
|
82 |
+
- [🤝 贡献](#-贡献)
|
83 |
- [🔥 社区](#-社区)
|
84 |
- [❓ 常见问题](#-常见问题)
|
85 |
+
- [📚 探索 CAMEL 依赖](#-探索-camel-依赖)
|
86 |
+
- [⭐ Star History](#-star-history)
|
87 |
|
88 |
|
89 |
# 🔥 新闻
|
90 |
|
91 |
+
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
|
92 |
+
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
|
93 |
+
🌟🌟🌟 <b>OWL社区用例征集令!</b> 🌟🌟🌟
|
94 |
+
</h3>
|
95 |
+
<p style="font-size: 1.1em; margin: 10px 0;">
|
96 |
+
我们请社区成员贡献创新的OWL用例!<br>
|
97 |
+
<b>前十名提交</b>将获得特别社区礼物和认可。
|
98 |
+
</p>
|
99 |
+
<p>
|
100 |
+
<a href="https://github.com/camel-ai/owl/tree/main/community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md" style="background-color: #d81b60; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">了解更多并提交</a>
|
101 |
+
</p>
|
102 |
+
<p style="margin: 5px 0;">
|
103 |
+
提交截止日期:<b>2025年3月31日</b>
|
104 |
+
</p>
|
105 |
+
</div>
|
106 |
+
|
107 |
+
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能,集成了火山引擎模型平台,并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
|
108 |
+
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit,增强 OWL Agent的工具调用、文件写入能力和终端命令执行功能。
|
109 |
+
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。
|
110 |
- **[2025.03.07]**: 我们开源了 🦉 OWL 项目的代码库。
|
111 |
+
- **[2025.03.03]**: OWL 在 GAIA 基准测试中取得 58.18 平均分,在开源框架中排名第一!
|
112 |
|
113 |
# 🎬 演示视频
|
114 |
|
|
|
204 |
conda deactivate
|
205 |
```
|
206 |
|
207 |
+
## **设置环境变量**
|
208 |
+
|
209 |
+
OWL 需要各种 API 密钥来与不同的服务进行交互。`owl/.env_template` 文件包含了所有必要 API 密钥的占位符,以及可以注册这些服务的链接。
|
210 |
+
|
211 |
+
### 选项 1:使用 `.env` 文件(推荐)
|
212 |
+
|
213 |
+
1. **复制并重命名模板**:
|
214 |
+
```bash
|
215 |
+
cd owl
|
216 |
+
cp .env_template .env
|
217 |
+
```
|
218 |
+
|
219 |
+
2. **配置你的 API 密钥**:
|
220 |
+
在你喜欢的文本编辑器中打开 `.env` 文件,并在相应字段中插入你的 API 密钥。
|
221 |
+
|
222 |
+
> **注意**:对于最小示例(`run_mini.py`),你只需要配置 LLM API 密钥(例如,`OPENAI_API_KEY`)。
|
223 |
+
|
224 |
+
### 选项 2:直接设置环境变量
|
225 |
+
|
226 |
+
或者,你可以直接在终端中设置环境变量:
|
227 |
+
|
228 |
+
- **macOS/Linux (Bash/Zsh)**:
|
229 |
+
```bash
|
230 |
+
export OPENAI_API_KEY="你的-openai-api-密钥"
|
231 |
+
```
|
232 |
|
233 |
+
- **Windows (命令提示符)**:
|
234 |
+
```batch
|
235 |
+
set OPENAI_API_KEY="你的-openai-api-密钥"
|
236 |
+
```
|
237 |
|
238 |
+
- **Windows (PowerShell)**:
|
239 |
+
```powershell
|
240 |
+
$env:OPENAI_API_KEY = "你的-openai-api-密钥"
|
241 |
+
```
|
242 |
|
243 |
+
> **注意**:直接在终端中设置的环境变量仅在当前会话中有效。
|
244 |
|
245 |
## **使用Docker运行**
|
246 |
|
|
|
287 |
|
288 |
## 使用不同的模型
|
289 |
|
290 |
+
### 模型要求
|
291 |
+
|
292 |
+
- **工具调用能力**:OWL 需要具有强大工具调用能力的模型来与各种工具包交互。模型必须能够理解工具描述、生成适当的工具调用,并处理工具输出。
|
293 |
+
|
294 |
+
- **多模态理解能力**:对于涉及网页交互、图像分析或视频处理的任务,需要具备多模态能力的模型来解释视觉内容和上下文。
|
295 |
+
|
296 |
+
#### 支持的模型
|
297 |
+
|
298 |
+
有关配置模型的信息,请参阅我们的 [CAMEL 模型文档](https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel)。
|
299 |
+
|
300 |
+
> **注意**:为获得最佳性能,我们强烈推荐使用 OpenAI 模型(GPT-4 或更高版本)。我们的实验表明,其他模型在复杂任务和基准测试上可能表现明显较差,尤其是那些需要多模态理解和工具使用的任务。
|
301 |
+
|
302 |
+
OWL 支持多种 LLM 后端,但功能可能因模型的工具调用和多模态能力而异。您可以使用以下脚本来运行不同的模型:
|
303 |
|
304 |
```bash
|
305 |
# 使用 Qwen 模型运行
|
306 |
+
python owl/run_qwen_zh.py
|
307 |
|
308 |
# 使用 Deepseek 模型运行
|
309 |
+
python owl/run_deepseek_zh.py
|
310 |
|
311 |
# 使用其他 OpenAI 兼容模型运行
|
312 |
python owl/run_openai_compatiable_model.py
|
313 |
+
|
314 |
+
# 使用 Ollama 运行
|
315 |
+
python owl/run_ollama.py
|
316 |
```
|
317 |
|
318 |
你可以通过修改 `run.py` 脚本来运行自己的任务:
|
|
|
347 |
- "帮我调试这段 Python 代码:[在此粘贴你的代码]"
|
348 |
- "总结这篇研究论文的主要观点:[论文URL]"
|
349 |
|
350 |
+
# 🧰 工具包与功能
|
351 |
+
|
352 |
+
> **重要提示**:有效使用工具包需要具备强大工具调用能力的模型。对于多模态工具包(Web、图像、视频),模型还必须具备多模态理解能力。
|
353 |
+
|
354 |
+
OWL支持多种工具包,可通过修改脚本中的`tools`列表进行自定义:
|
355 |
+
|
356 |
+
```python
|
357 |
+
# 配置工具包
|
358 |
+
tools = [
|
359 |
+
*WebToolkit(headless=False).get_tools(), # 浏览器自动化
|
360 |
+
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
361 |
+
*AudioAnalysisToolkit().get_tools(), # 需要OpenAI API密钥
|
362 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
363 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
364 |
+
SearchToolkit().search_duckduckgo,
|
365 |
+
SearchToolkit().search_google, # 如果不可用请注释
|
366 |
+
SearchToolkit().search_wiki,
|
367 |
+
*ExcelToolkit().get_tools(),
|
368 |
+
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
369 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
370 |
+
]
|
371 |
+
```
|
372 |
+
|
373 |
+
## 主要工具包
|
374 |
+
|
375 |
+
关键工具包包括:
|
376 |
+
|
377 |
+
### 多模态工具包(需要模型具备多模态能力)
|
378 |
+
- **WebToolkit**:浏览器自动化,用于网页交互和导航
|
379 |
+
- **VideoAnalysisToolkit**:视频处理和内容分析
|
380 |
+
- **ImageAnalysisToolkit**:图像分析和解释
|
381 |
+
|
382 |
+
### 基于文本的工具包
|
383 |
+
- **AudioAnalysisToolkit**:音频处理(需要 OpenAI API)
|
384 |
+
- **CodeExecutionToolkit**:Python 代码执行和评估
|
385 |
+
- **SearchToolkit**:网络搜索(Google、DuckDuckGo、维基百科)
|
386 |
+
- **DocumentProcessingToolkit**:文档解析(PDF、DOCX等)
|
387 |
+
|
388 |
+
其他专用工具包:ArxivToolkit、GitHubToolkit、GoogleMapsToolkit、MathToolkit、NetworkXToolkit、NotionToolkit、RedditToolkit、WeatherToolkit等。完整工具包列表请参阅[CAMEL工具包文档](https://docs.camel-ai.org/key_modules/tools.html#built-in-toolkits)。
|
389 |
+
|
390 |
+
## 自定义配置
|
391 |
+
|
392 |
+
自定义可用工具的方法:
|
393 |
+
|
394 |
+
```python
|
395 |
+
# 1. 导入工具包
|
396 |
+
from camel.toolkits import WebToolkit, SearchToolkit, CodeExecutionToolkit
|
397 |
+
|
398 |
+
# 2. 配置工具列表
|
399 |
+
tools = [
|
400 |
+
*WebToolkit(headless=True).get_tools(),
|
401 |
+
SearchToolkit().search_wiki,
|
402 |
+
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
|
403 |
+
]
|
404 |
+
|
405 |
+
# 3. 传递给助手代理
|
406 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
407 |
+
```
|
408 |
+
|
409 |
+
选择必要的工具包可优化性能并减少资源使用。
|
410 |
+
|
411 |
# 🌐 网页界面
|
412 |
|
413 |
OWL 现在包含一个基于网页的用户界面,使与系统交互变得更加容易。要启动网页界面,请运行:
|
414 |
|
415 |
```bash
|
416 |
+
# 中文版本
|
417 |
+
python run_app_zh.py
|
418 |
+
|
419 |
+
# 英文版本
|
420 |
python run_app.py
|
421 |
```
|
422 |
|
|
|
446 |
|
447 |
# ⏱️ 未来计划
|
448 |
|
449 |
+
我们正在不断努力改进 OWL。以下是我们的路线图:
|
|
|
|
|
450 |
|
451 |
+
- [ ] 撰写技术博客,详细介绍我们在现实任务中多智能体协作方面的探索与见解
|
452 |
+
- [ ] 通过引入更多针对特定领域任务的专业工具,进一步完善工具生态系统
|
453 |
+
- [ ] 开发更复杂的智能体交互模式和通信协议
|
454 |
+
- [ ] 提高复杂多步推理任务的性能
|
455 |
|
456 |
# 📄 许可证
|
457 |
|
|
|
472 |
}
|
473 |
```
|
474 |
|
475 |
+
# 🤝 贡献
|
476 |
+
|
477 |
+
我们欢迎社区的贡献!以下是您可以提供帮助的方式:
|
478 |
+
|
479 |
+
1. 阅读我们的[贡献指南](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md)
|
480 |
+
2. 查看[开放的问题](https://github.com/camel-ai/camel/issues)或创建新的问题
|
481 |
+
3. 提交包含您改进的拉取请求
|
482 |
+
|
483 |
+
**当前开放贡献的问题:**
|
484 |
+
- [#1770](https://github.com/camel-ai/camel/issues/1770)
|
485 |
+
- [#1712](https://github.com/camel-ai/camel/issues/1712)
|
486 |
+
- [#1537](https://github.com/camel-ai/camel/issues/1537)
|
487 |
+
- [#1827](https://github.com/camel-ai/camel/issues/1827)
|
488 |
+
|
489 |
+
要认领一个问题,只需在该问题下留言表明您的兴趣即可。
|
490 |
+
|
491 |
# 🔥 社区
|
492 |
+
加入我们的 ([*Discord*](https://discord.camel-ai.org/) 或 [*微信*](https://ghli.org/camel/wechat.png)) 社区,一起探索智能体扩展规律的边界。
|
493 |
+
|
494 |
加入我们,参与更多讨论!
|
495 |
+

|
|
|
496 |
<!--  -->
|
497 |
|
498 |
# ❓ 常见问题
|
|
|
501 |
|
502 |
A: 当OWL判断某个任务可以使用非浏览器工具(如搜索、代码分析等)完成时,浏览器就不会启动。只有在判断需要使用浏览器工具的时候,本地才会弹出浏览器窗口,并进行浏览器模拟交互。
|
503 |
|
504 |
+
**Q: 我应该使用哪个Python版本?**
|
505 |
+
|
506 |
+
A: OWL支持Python 3.10、3.11和3.12。为了与所有依赖项获得最佳兼容性,我们推荐使用Python 3.10。
|
507 |
+
|
508 |
+
**Q: 我如何为项目做贡献?**
|
509 |
+
|
510 |
+
A: 请参阅我们的[贡献](#-贡献)部分,了解如何参与的详细信息。我们欢迎各种形式的贡献,从代码改进到文档更新。
|
511 |
+
|
512 |
+
# 📚 探索 CAMEL 依赖
|
513 |
+
|
514 |
+
OWL 是基于 [CAMEL](https://github.com/camel-ai/camel) 框架构建的,以下是如何探索 CAMEL 源代码并了解其与 OWL 的工作方式:
|
515 |
+
|
516 |
+
## 访问 CAMEL 源代码
|
517 |
+
|
518 |
+
```bash
|
519 |
+
# 克隆 CAMEL 仓库
|
520 |
+
git clone https://github.com/camel-ai/camel.git
|
521 |
+
cd camel
|
522 |
+
```
|
523 |
+
|
524 |
+
# ⭐ Star History
|
525 |
+
|
526 |
+
[](https://star-history.com/#camel-ai/owl&Date)
|
527 |
+
|
528 |
[docs-image]: https://img.shields.io/badge/Documentation-EB3ECC
|
529 |
[docs-url]: https://camel-ai.github.io/camel/index.html
|
530 |
[star-image]: https://img.shields.io/github/stars/camel-ai/owl?label=stars&logo=github&color=brightgreen
|
community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 🦉 OWL Community Call for Use Cases
|
2 |
+
# 🦉 OWL 社区用例征集令
|
3 |
+
|
4 |
+
<div align="center">
|
5 |
+
|
6 |
+
[![Documentation][docs-image]][docs-url]
|
7 |
+
[![Discord][discord-image]][discord-url]
|
8 |
+
[![X][x-image]][x-url]
|
9 |
+
[![Reddit][reddit-image]][reddit-url]
|
10 |
+
[![Wechat][wechat-image]][wechat-url]
|
11 |
+
[![Star][star-image]][star-url]
|
12 |
+
|
13 |
+
</div>
|
14 |
+
|
15 |
+
<div align="center">
|
16 |
+
<h4 align="center">
|
17 |
+
|
18 |
+
[English](#join-the-owl-community-contribute-your-use-cases) | [中文](#加入owl社区贡献您的用例)
|
19 |
+
|
20 |
+
</h4>
|
21 |
+
</div>
|
22 |
+
|
23 |
+
## Join the OWL Community: Contribute Your Use Cases!
|
24 |
+
|
25 |
+
Dear OWL Community,
|
26 |
+
|
27 |
+
We are excited to announce a special initiative to expand the capabilities and applications of the OWL framework! As the #1 ranked open-source multi-agent collaboration framework on the [GAIA benchmark](https://huggingface.co/spaces/gaia-benchmark/leaderboard), OWL is revolutionizing how AI agents collaborate to solve real-world tasks.
|
28 |
+
|
29 |
+
### 🌟 What We're Looking For
|
30 |
+
|
31 |
+
We invite you to contribute use cases that demonstrate the power and versatility of OWL in two ways:
|
32 |
+
|
33 |
+
1. **Leverage Existing Tools and Models**: Create innovative use cases using OWL's supported tools and models, then submit a PR to our repository.
|
34 |
+
2. **Extend OWL's Capabilities**: Develop new tools that expand OWL's functionality to implement your own unique use cases.
|
35 |
+
|
36 |
+
### 🏆 Community Rewards
|
37 |
+
|
38 |
+
The **top ten submissions** will receive:
|
39 |
+
- Special community gifts
|
40 |
+
- Featured promotion within the OWL community
|
41 |
+
- Recognition of your contributions and authorship
|
42 |
+
|
43 |
+
### 💡 Submission Guidelines
|
44 |
+
|
45 |
+
Your submission should include:
|
46 |
+
|
47 |
+
1. **Well-documented code**: Clear comments and instructions for running your use case
|
48 |
+
2. **Description file**: Explaining what your use case does and why it's valuable
|
49 |
+
3. **Requirements**: Any additional dependencies needed
|
50 |
+
4. **Example outputs**: Demonstrations of your use case in action
|
51 |
+
|
52 |
+
### 🔍 Evaluation Criteria
|
53 |
+
|
54 |
+
Submissions will be evaluated based on:
|
55 |
+
- **Innovation**: How creative and novel is your use case?
|
56 |
+
- **Utility**: How useful is it for real-world applications?
|
57 |
+
- **Implementation**: How well is it coded and documented?
|
58 |
+
- **Extensibility**: How easily can others build upon your work?
|
59 |
+
- **Community Engagement**: Sharing your use case on social media platforms (Zhihu, Xiaohongshu, X/Twitter, YouTube, etc.) will earn you extra points
|
60 |
+
|
61 |
+
### 📝 How to Submit
|
62 |
+
|
63 |
+
1. Fork the OWL repository
|
64 |
+
2. Create your use case in the `examples/community/` directory
|
65 |
+
3. Submit a Pull Request with a detailed description of your contribution
|
66 |
+
4. Tag your PR with `community-use-case`
|
67 |
+
|
68 |
+
### ⏰ Timeline
|
69 |
+
|
70 |
+
- Submission deadline: March 31, 2025
|
71 |
+
- Winners announcement: April 7, 2025
|
72 |
+
|
73 |
+
### 🚀 Inspiration Areas
|
74 |
+
|
75 |
+
Consider exploring use cases in:
|
76 |
+
- Data analysis and visualization
|
77 |
+
- Content creation and summarization
|
78 |
+
- Research assistance
|
79 |
+
- Educational tools
|
80 |
+
- Business process automation
|
81 |
+
- Creative applications
|
82 |
+
- Cross-modal interactions (text, image, audio, video)
|
83 |
+
|
84 |
+
### 🤝 Community Support
|
85 |
+
|
86 |
+
Need help or have questions? Join our community channels:
|
87 |
+
- [Discord](https://discord.gg/CNcNpquyDc)
|
88 |
+
- [GitHub Discussions](https://github.com/camel-ai/owl/discussions)
|
89 |
+
|
90 |
+
Let's build the future of multi-agent AI together!
|
91 |
+
|
92 |
+
---
|
93 |
+
|
94 |
+
## 加入OWL社区:贡献您的用例!
|
95 |
+
|
96 |
+
亲爱的OWL社区成员,
|
97 |
+
|
98 |
+
我们很高兴宣布一项特别计划,旨在扩展OWL框架的功能和应用!作为在[GAIA基准测试](https://huggingface.co/spaces/gaia-benchmark/leaderboard)中排名第一的开源多智能体协作框架,OWL正在彻底改变AI智能体协作解决现实任务的方式。
|
99 |
+
|
100 |
+
### 🌟 我们在寻找什么
|
101 |
+
|
102 |
+
我们邀请您通过以下两种方式贡献展示OWL强大功能和多样性的用例:
|
103 |
+
|
104 |
+
1. **利用现有工具和模型**:使用OWL支持的工具和模型创建创新用例,然后向我们的仓库提交PR。
|
105 |
+
2. **扩展OWL的功能**:开发新工具,扩展OWL的功能,实现您自己独特的用例。
|
106 |
+
|
107 |
+
### 🏆 社区奖励
|
108 |
+
|
109 |
+
**前十名**将获得:
|
110 |
+
- 特别社区礼物
|
111 |
+
- 在OWL社区内的推广展示
|
112 |
+
- 对您贡献和作者身份的认可
|
113 |
+
|
114 |
+
### 💡 提交指南
|
115 |
+
|
116 |
+
您的提交应包括:
|
117 |
+
|
118 |
+
1. **文档完善的代码**:清晰的注释和运行用例的说明
|
119 |
+
2. **描述文件**:解释您的用例做什么以及为什么它有价值
|
120 |
+
3. **依赖要求**:需要的任何额外依赖
|
121 |
+
4. **示例输出**:展示您的用例实际运行效果
|
122 |
+
|
123 |
+
### 🔍 评估标准
|
124 |
+
|
125 |
+
提交将基于以下标准进行评估:
|
126 |
+
- **创新性**:您的用例有多创新和新颖?
|
127 |
+
- **实用性**:它对现实世界应用有多大用处?
|
128 |
+
- **实现质量**:代码和文档的质量如何?
|
129 |
+
- **可扩展性**:其他人能多容易地在您的工作基础上进行扩展?
|
130 |
+
- **社区参与度**:在社交媒体平台(知乎、小红书、X/Twitter、YouTube等)分享您的用例将获得额外加分
|
131 |
+
|
132 |
+
### 📝 如何提交
|
133 |
+
|
134 |
+
1. Fork OWL仓库
|
135 |
+
2. 在`community_usecase/`目录中创建您的用例
|
136 |
+
3. 提交一个包含您贡献详细描述的Pull Request
|
137 |
+
4. ���用`community-use-case`标签标记您的PR
|
138 |
+
|
139 |
+
### ⏰ 时间线
|
140 |
+
|
141 |
+
- 提交截止日期:2025年3月31日
|
142 |
+
- 获奖者公布:2025年4月7日
|
143 |
+
|
144 |
+
### 🚀 灵感领域
|
145 |
+
|
146 |
+
考虑探索以下领域的用例:
|
147 |
+
- 数据分析和可视化
|
148 |
+
- 内容创建和摘要
|
149 |
+
- 研究辅助
|
150 |
+
- 教育工具
|
151 |
+
- 业务流程自动化
|
152 |
+
- 创意应用
|
153 |
+
- 跨模态交互(文本、图像、音频、视频)
|
154 |
+
|
155 |
+
### 🤝 社区支持
|
156 |
+
|
157 |
+
需要帮助或有问题?加入我们的社区渠道:
|
158 |
+
- [Discord](https://discord.gg/CNcNpquyDc)
|
159 |
+
- [GitHub讨论](https://github.com/camel-ai/owl/discussions)
|
160 |
+
|
161 |
+
让我们一起构建多智能体AI的未来!
|
162 |
+
|
163 |
+
<!-- Links and badges -->
|
164 |
+
[docs-image]: https://img.shields.io/badge/docs-OWL-blue
|
165 |
+
[docs-url]: https://docs.camel-ai.org/
|
166 |
+
[discord-image]: https://img.shields.io/discord/1135106975706013747?color=7289da&label=Discord&logo=discord&logoColor=white
|
167 |
+
[discord-url]: https://discord.gg/CNcNpquyDc
|
168 |
+
[x-image]: https://img.shields.io/badge/Twitter-black?logo=x
|
169 |
+
[x-url]: https://twitter.com/CamelAIOrg
|
170 |
+
[reddit-image]: https://img.shields.io/badge/Reddit-FF4500?logo=reddit&logoColor=white
|
171 |
+
[reddit-url]: https://www.reddit.com/r/camelai/
|
172 |
+
[wechat-image]: https://img.shields.io/badge/WeChat-07C160?logo=wechat&logoColor=white
|
173 |
+
[wechat-url]: https://docs.camel-ai.org/blog/2023/11/29/camel-wechat/
|
174 |
+
[star-image]: https://img.shields.io/github/stars/camel-ai/owl?style=social
|
175 |
+
[star-url]: https://github.com/camel-ai/owl
|
owl/.env_template
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
# MODEL & API (See https://
|
2 |
|
3 |
# OPENAI API
|
4 |
-
OPENAI_API_KEY
|
5 |
-
# OPENAI_API_BASE_URL
|
6 |
|
7 |
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
|
8 |
# QWEN_API_KEY=""
|
@@ -26,3 +26,4 @@ CHUNKR_API_KEY=""
|
|
26 |
|
27 |
# Firecrawl API (https://www.firecrawl.dev/)
|
28 |
FIRECRAWL_API_KEY=""
|
|
|
|
1 |
+
# MODEL & API (See https://docs.camel-ai.org/key_modules/models.html#)
|
2 |
|
3 |
# OPENAI API
|
4 |
+
# OPENAI_API_KEY= ""
|
5 |
+
# OPENAI_API_BASE_URL=""
|
6 |
|
7 |
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
|
8 |
# QWEN_API_KEY=""
|
|
|
26 |
|
27 |
# Firecrawl API (https://www.firecrawl.dev/)
|
28 |
FIRECRAWL_API_KEY=""
|
29 |
+
#FIRECRAWL_API_URL="https://api.firecrawl.dev"
|
owl/app.py
CHANGED
@@ -25,7 +25,7 @@ import signal
|
|
25 |
import dotenv
|
26 |
|
27 |
# 设置日志队列
|
28 |
-
log_queue = queue.Queue()
|
29 |
|
30 |
# 当前运行的进程
|
31 |
current_process = None
|
@@ -39,6 +39,9 @@ SCRIPTS = {
|
|
39 |
"DeepSeek (中文)": "run_deepseek_zh.py",
|
40 |
"Default": "run.py",
|
41 |
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
|
|
|
|
|
|
42 |
}
|
43 |
|
44 |
# 脚本描述
|
@@ -49,6 +52,9 @@ SCRIPT_DESCRIPTIONS = {
|
|
49 |
"DeepSeek (中文)": "使用DeepSeek模型,适合非多模态任务",
|
50 |
"Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
|
51 |
"GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
|
|
|
|
|
|
|
52 |
}
|
53 |
|
54 |
# 环境变量分组
|
@@ -144,33 +150,45 @@ def load_env_vars():
|
|
144 |
|
145 |
# 加载.env文件中可能存在的其他环境变量
|
146 |
if Path(".env").exists():
|
147 |
-
|
148 |
-
|
149 |
-
line
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
"name"
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
return env_vars
|
176 |
|
@@ -182,30 +200,49 @@ def save_env_vars(env_vars):
|
|
182 |
existing_content = {}
|
183 |
|
184 |
if env_path.exists():
|
185 |
-
|
186 |
-
|
187 |
-
line
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
# 更新环境变量
|
193 |
for key, value in env_vars.items():
|
194 |
-
if value: #
|
195 |
-
#
|
196 |
value = str(value) # 确保值是字符串
|
197 |
-
|
|
|
|
|
198 |
value.startswith("'") and value.endswith("'")
|
199 |
):
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
# 写入.env文件
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
209 |
|
210 |
return "✅ 环境变量已保存"
|
211 |
|
@@ -239,27 +276,128 @@ def add_custom_env_var(name, value, var_type):
|
|
239 |
return f"✅ 已添加环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
240 |
|
241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
def terminate_process():
|
243 |
"""终止当前运行的进程"""
|
244 |
global current_process
|
245 |
|
246 |
with process_lock:
|
247 |
if current_process is not None and current_process.poll() is None:
|
248 |
-
# 在Windows上使用CTRL_BREAK_EVENT,在Unix上使用SIGTERM
|
249 |
-
if os.name == "nt":
|
250 |
-
current_process.send_signal(signal.CTRL_BREAK_EVENT)
|
251 |
-
else:
|
252 |
-
current_process.terminate()
|
253 |
-
|
254 |
-
# 等待进程终止
|
255 |
try:
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
else:
|
264 |
return "❌ 没有正在运行的进程"
|
265 |
|
@@ -288,14 +426,21 @@ def run_script(script_dropdown, question, progress=gr.Progress()):
|
|
288 |
log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
|
289 |
|
290 |
# 构建命令
|
|
|
|
|
|
|
291 |
cmd = [
|
292 |
sys.executable,
|
293 |
-
os.path.join("owl", "script_adapter.py"),
|
294 |
-
os.path.join("owl", script_name),
|
295 |
]
|
296 |
|
297 |
# 创建环境变量副本并添加问题
|
298 |
env = os.environ.copy()
|
|
|
|
|
|
|
|
|
299 |
env["OWL_QUESTION"] = question
|
300 |
|
301 |
# 启动进程
|
@@ -307,12 +452,24 @@ def run_script(script_dropdown, question, progress=gr.Progress()):
|
|
307 |
text=True,
|
308 |
bufsize=1,
|
309 |
env=env,
|
|
|
310 |
)
|
311 |
|
312 |
# 创建线程来读取输出
|
313 |
def read_output():
|
314 |
try:
|
315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
for line in iter(current_process.stdout.readline, ""):
|
317 |
if line:
|
318 |
# 写入日志文件
|
@@ -456,12 +613,12 @@ def create_ui():
|
|
456 |
gr.Markdown(
|
457 |
"""
|
458 |
# 🦉 OWL 智能助手运行平台
|
459 |
-
|
460 |
选择一个模型并输入您的问题,系统将运行���应的脚本并显示结果。
|
461 |
"""
|
462 |
)
|
463 |
|
464 |
-
with gr.Tabs()
|
465 |
with gr.TabItem("运行模式"):
|
466 |
with gr.Row():
|
467 |
with gr.Column(scale=1):
|
@@ -488,12 +645,17 @@ def create_ui():
|
|
488 |
)
|
489 |
|
490 |
question_input = gr.Textbox(
|
491 |
-
lines=
|
|
|
|
|
|
|
|
|
492 |
)
|
493 |
|
494 |
gr.Markdown(
|
495 |
"""
|
496 |
> **注意**: 您输入的问题将替换脚本中的默认问题。系统会自动处理问题的替换,确保您的问题被正确使用。
|
|
|
497 |
"""
|
498 |
)
|
499 |
|
@@ -559,12 +721,72 @@ def create_ui():
|
|
559 |
visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
|
560 |
)
|
561 |
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
568 |
|
569 |
# 现有环境变量配置
|
570 |
for group_name, vars in ENV_GROUPS.items():
|
@@ -641,7 +863,7 @@ def create_ui():
|
|
641 |
gr.Markdown(
|
642 |
"""
|
643 |
### 📝 使用说明
|
644 |
-
|
645 |
- 选择一个模型并输入您的问题
|
646 |
- 点击"运行"按钮开始执行
|
647 |
- 如需终止运行���点击"终止"按钮
|
@@ -650,9 +872,9 @@ def create_ui():
|
|
650 |
- 在"聊天历史"标签页查看对话历史(如果有)
|
651 |
- 在"环境变量配置"标签页配置API密钥和其他环境变量
|
652 |
- 您可以添加自定义环境变量,满足特殊需求
|
653 |
-
|
654 |
### ⚠️ 注意事项
|
655 |
-
|
656 |
- 运行某些模型可能需要API密钥,请确保在"环境变量配置"标签页中设置了相应的环境变量
|
657 |
- 某些脚本可能需要较长时间运行,请耐心等待
|
658 |
- 如果运行超过30分钟,进程将自动终止
|
|
|
25 |
import dotenv
|
26 |
|
27 |
# 设置日志队列
|
28 |
+
log_queue: queue.Queue[str] = queue.Queue()
|
29 |
|
30 |
# 当前运行的进程
|
31 |
current_process = None
|
|
|
39 |
"DeepSeek (中文)": "run_deepseek_zh.py",
|
40 |
"Default": "run.py",
|
41 |
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
42 |
+
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
43 |
+
"Ollama": "run_ollama.py",
|
44 |
+
"Terminal": "run_terminal_zh.py",
|
45 |
}
|
46 |
|
47 |
# 脚本描述
|
|
|
52 |
"DeepSeek (中文)": "使用DeepSeek模型,适合非多模态任务",
|
53 |
"Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
|
54 |
"GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
|
55 |
+
"OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
|
56 |
+
"Ollama": "使用Ollama API",
|
57 |
+
"Terminal": "使用本地终端执行python文件",
|
58 |
}
|
59 |
|
60 |
# 环境变量分组
|
|
|
150 |
|
151 |
# 加载.env文件中可能存在的其他环境变量
|
152 |
if Path(".env").exists():
|
153 |
+
try:
|
154 |
+
with open(".env", "r", encoding="utf-8") as f:
|
155 |
+
for line in f:
|
156 |
+
line = line.strip()
|
157 |
+
if line and not line.startswith("#") and "=" in line:
|
158 |
+
try:
|
159 |
+
key, value = line.split("=", 1)
|
160 |
+
key = key.strip()
|
161 |
+
value = value.strip()
|
162 |
+
|
163 |
+
# 处理引号包裹的值
|
164 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
165 |
+
value.startswith("'") and value.endswith("'")
|
166 |
+
):
|
167 |
+
value = value[1:-1] # 移除首尾的引号
|
168 |
+
|
169 |
+
# 检查是否是已知的环境变量
|
170 |
+
known_var = False
|
171 |
+
for group in ENV_GROUPS.values():
|
172 |
+
if any(var["name"] == key for var in group):
|
173 |
+
known_var = True
|
174 |
+
break
|
175 |
+
|
176 |
+
# 如果不是已知的环境变量,添加到自定义环境变量组
|
177 |
+
if not known_var and key not in env_vars:
|
178 |
+
ENV_GROUPS["自定义环境变量"].append(
|
179 |
+
{
|
180 |
+
"name": key,
|
181 |
+
"label": key,
|
182 |
+
"type": "text",
|
183 |
+
"required": False,
|
184 |
+
"help": "用户自定义环境变量",
|
185 |
+
}
|
186 |
+
)
|
187 |
+
env_vars[key] = value
|
188 |
+
except Exception as e:
|
189 |
+
print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
|
190 |
+
except Exception as e:
|
191 |
+
print(f"加载.env文件时出错: {str(e)}")
|
192 |
|
193 |
return env_vars
|
194 |
|
|
|
200 |
existing_content = {}
|
201 |
|
202 |
if env_path.exists():
|
203 |
+
try:
|
204 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
205 |
+
for line in f:
|
206 |
+
line = line.strip()
|
207 |
+
if line and not line.startswith("#") and "=" in line:
|
208 |
+
try:
|
209 |
+
key, value = line.split("=", 1)
|
210 |
+
existing_content[key.strip()] = value.strip()
|
211 |
+
except Exception as e:
|
212 |
+
print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
|
213 |
+
except Exception as e:
|
214 |
+
print(f"读取.env文件时出错: {str(e)}")
|
215 |
|
216 |
# 更新环境变量
|
217 |
for key, value in env_vars.items():
|
218 |
+
if value is not None: # 允许空字符串值,但不允许None
|
219 |
+
# 确保值是字符串形式
|
220 |
value = str(value) # 确保值是字符串
|
221 |
+
|
222 |
+
# 检查值是否已经被引号包裹
|
223 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
224 |
value.startswith("'") and value.endswith("'")
|
225 |
):
|
226 |
+
# 已经被引号包裹,保持原样
|
227 |
+
existing_content[key] = value
|
228 |
+
# 更新环境变量时移除引号
|
229 |
+
os.environ[key] = value[1:-1]
|
230 |
+
else:
|
231 |
+
# 没有被引号包裹,添加双引号
|
232 |
+
# 用双引号包裹值,确保特殊字符被正确处理
|
233 |
+
quoted_value = f'"{value}"'
|
234 |
+
existing_content[key] = quoted_value
|
235 |
+
# 同时更新当前进程的环境变量(使用未引用的值)
|
236 |
+
os.environ[key] = value
|
237 |
|
238 |
# 写入.env文件
|
239 |
+
try:
|
240 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
241 |
+
for key, value in existing_content.items():
|
242 |
+
f.write(f"{key}={value}\n")
|
243 |
+
except Exception as e:
|
244 |
+
print(f"写入.env文件时出错: {str(e)}")
|
245 |
+
return f"❌ 保存环境变量失败: {str(e)}"
|
246 |
|
247 |
return "✅ 环境变量已保存"
|
248 |
|
|
|
276 |
return f"✅ 已添加环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
277 |
|
278 |
|
279 |
+
def update_custom_env_var(name, value, var_type):
|
280 |
+
"""更改自定义环境变量"""
|
281 |
+
if not name:
|
282 |
+
return "❌ 环境变量名不能为空", None
|
283 |
+
|
284 |
+
# 检查环境变量是否存在于自定义环境变量组中
|
285 |
+
found = False
|
286 |
+
for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
|
287 |
+
if var["name"] == name:
|
288 |
+
# 更新类型
|
289 |
+
ENV_GROUPS["自定义环境变量"][i]["type"] = var_type
|
290 |
+
found = True
|
291 |
+
break
|
292 |
+
|
293 |
+
if not found:
|
294 |
+
return f"❌ 自定义环境变量 {name} 不存在", None
|
295 |
+
|
296 |
+
# 保存环境变量值
|
297 |
+
env_vars = {name: value}
|
298 |
+
save_env_vars(env_vars)
|
299 |
+
|
300 |
+
# 返回成功消息和更新后的环境变量组
|
301 |
+
return f"✅ 已更新环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
302 |
+
|
303 |
+
|
304 |
+
def delete_custom_env_var(name):
|
305 |
+
"""删除自定义环境变量"""
|
306 |
+
if not name:
|
307 |
+
return "❌ 环境变量名不能为空", None
|
308 |
+
|
309 |
+
# 检查环境变量是否存在于自定义环境变量组中
|
310 |
+
found = False
|
311 |
+
for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
|
312 |
+
if var["name"] == name:
|
313 |
+
# 从自定义环境变量组中删除
|
314 |
+
del ENV_GROUPS["自定义环境变量"][i]
|
315 |
+
found = True
|
316 |
+
break
|
317 |
+
|
318 |
+
if not found:
|
319 |
+
return f"❌ 自定义环境变量 {name} 不存在", None
|
320 |
+
|
321 |
+
# 从.env文件中删除该环境变量
|
322 |
+
env_path = Path(".env")
|
323 |
+
if env_path.exists():
|
324 |
+
try:
|
325 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
326 |
+
lines = f.readlines()
|
327 |
+
|
328 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
329 |
+
for line in lines:
|
330 |
+
try:
|
331 |
+
# 更精确地匹配环境变量行
|
332 |
+
line_stripped = line.strip()
|
333 |
+
# 检查是否为注释行或空行
|
334 |
+
if not line_stripped or line_stripped.startswith("#"):
|
335 |
+
f.write(line) # 保留注释行和空行
|
336 |
+
continue
|
337 |
+
|
338 |
+
# 检查是否包含等号
|
339 |
+
if "=" not in line_stripped:
|
340 |
+
f.write(line) # 保留不包含等号的行
|
341 |
+
continue
|
342 |
+
|
343 |
+
# 提取变量名并检查是否与要删除的变量匹配
|
344 |
+
var_name = line_stripped.split("=", 1)[0].strip()
|
345 |
+
if var_name != name:
|
346 |
+
f.write(line) # 保留不匹配的变量
|
347 |
+
except Exception as e:
|
348 |
+
print(f"处理.env文件行时出错: {line}, 错误: {str(e)}")
|
349 |
+
# 出错时保留原行
|
350 |
+
f.write(line)
|
351 |
+
except Exception as e:
|
352 |
+
print(f"删除环境变量时出错: {str(e)}")
|
353 |
+
return f"❌ 删除环境变量失败: {str(e)}", None
|
354 |
+
|
355 |
+
# 从当前进程的环境变量中删除
|
356 |
+
if name in os.environ:
|
357 |
+
del os.environ[name]
|
358 |
+
|
359 |
+
# 返回成功消息和更新后的环境变量组
|
360 |
+
return f"✅ 已删除环境变量 {name}", ENV_GROUPS["自定义环境变量"]
|
361 |
+
|
362 |
+
|
363 |
def terminate_process():
|
364 |
"""终止当前运行的进程"""
|
365 |
global current_process
|
366 |
|
367 |
with process_lock:
|
368 |
if current_process is not None and current_process.poll() is None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
369 |
try:
|
370 |
+
# 在Windows上使用taskkill强制终止进程树
|
371 |
+
if os.name == "nt":
|
372 |
+
# 获取进程ID
|
373 |
+
pid = current_process.pid
|
374 |
+
# 使用taskkill命令终止进程及其子进程 - 避免使用shell=True以提高安全性
|
375 |
+
try:
|
376 |
+
subprocess.run(
|
377 |
+
["taskkill", "/F", "/T", "/PID", str(pid)], check=False
|
378 |
+
)
|
379 |
+
except subprocess.SubprocessError as e:
|
380 |
+
log_queue.put(f"终止进程时出错: {str(e)}\n")
|
381 |
+
return f"❌ 终止进程时出错: {str(e)}"
|
382 |
+
else:
|
383 |
+
# 在Unix上使用SIGTERM和SIGKILL
|
384 |
+
current_process.terminate()
|
385 |
+
try:
|
386 |
+
current_process.wait(timeout=3)
|
387 |
+
except subprocess.TimeoutExpired:
|
388 |
+
current_process.kill()
|
389 |
+
|
390 |
+
# 等待进程终止
|
391 |
+
try:
|
392 |
+
current_process.wait(timeout=2)
|
393 |
+
except subprocess.TimeoutExpired:
|
394 |
+
pass # 已经尝试强制终止,忽略超时
|
395 |
+
|
396 |
+
log_queue.put("进程已终止\n")
|
397 |
+
return "✅ 进程已终止"
|
398 |
+
except Exception as e:
|
399 |
+
log_queue.put(f"终止进程时出错: {str(e)}\n")
|
400 |
+
return f"❌ 终止进程时出错: {str(e)}"
|
401 |
else:
|
402 |
return "❌ 没有正在运行的进程"
|
403 |
|
|
|
426 |
log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
|
427 |
|
428 |
# 构建命令
|
429 |
+
# 获取当前脚本所在的基础路径
|
430 |
+
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
431 |
+
|
432 |
cmd = [
|
433 |
sys.executable,
|
434 |
+
os.path.join(base_path, "owl", "script_adapter.py"),
|
435 |
+
os.path.join(base_path, "owl", script_name),
|
436 |
]
|
437 |
|
438 |
# 创建环境变量副本并添加问题
|
439 |
env = os.environ.copy()
|
440 |
+
# 确保问题是字符串类型
|
441 |
+
if not isinstance(question, str):
|
442 |
+
question = str(question)
|
443 |
+
# 保留换行符,但确保是有效的字符串
|
444 |
env["OWL_QUESTION"] = question
|
445 |
|
446 |
# 启动进程
|
|
|
452 |
text=True,
|
453 |
bufsize=1,
|
454 |
env=env,
|
455 |
+
encoding="utf-8",
|
456 |
)
|
457 |
|
458 |
# 创建线程来读取输出
|
459 |
def read_output():
|
460 |
try:
|
461 |
+
# 使用唯一的时间戳确保日志文件名不重复
|
462 |
+
timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
463 |
+
unique_log_file = (
|
464 |
+
log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
|
465 |
+
)
|
466 |
+
|
467 |
+
# 使用这个唯一的文件名写入日志
|
468 |
+
with open(unique_log_file, "w", encoding="utf-8") as f:
|
469 |
+
# 更新全局日志文件路径
|
470 |
+
nonlocal log_file
|
471 |
+
log_file = unique_log_file
|
472 |
+
|
473 |
for line in iter(current_process.stdout.readline, ""):
|
474 |
if line:
|
475 |
# 写入日志文件
|
|
|
613 |
gr.Markdown(
|
614 |
"""
|
615 |
# 🦉 OWL 智能助手运行平台
|
616 |
+
|
617 |
选择一个模型并输入您的问题,系统将运行���应的脚本并显示结果。
|
618 |
"""
|
619 |
)
|
620 |
|
621 |
+
with gr.Tabs():
|
622 |
with gr.TabItem("运行模式"):
|
623 |
with gr.Row():
|
624 |
with gr.Column(scale=1):
|
|
|
645 |
)
|
646 |
|
647 |
question_input = gr.Textbox(
|
648 |
+
lines=8,
|
649 |
+
placeholder="请输入您的问题...",
|
650 |
+
label="问题",
|
651 |
+
elem_id="question_input",
|
652 |
+
show_copy_button=True,
|
653 |
)
|
654 |
|
655 |
gr.Markdown(
|
656 |
"""
|
657 |
> **注意**: 您输入的问题将替换脚本中的默认问题。系统会自动处理问题的替换,确保您的问题被正确使用。
|
658 |
+
> 支持多行输入,换行将被保留。
|
659 |
"""
|
660 |
)
|
661 |
|
|
|
721 |
visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
|
722 |
)
|
723 |
|
724 |
+
# 更改和删除自定义环境变量部分
|
725 |
+
with gr.Accordion(
|
726 |
+
"更改或删除自定义环境变量",
|
727 |
+
open=True,
|
728 |
+
visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
|
729 |
+
) as update_delete_accordion:
|
730 |
+
with gr.Row():
|
731 |
+
# 创建下拉菜单,显示所有自定义环境变量
|
732 |
+
custom_var_dropdown = gr.Dropdown(
|
733 |
+
choices=[
|
734 |
+
var["name"] for var in ENV_GROUPS["自定义环境变量"]
|
735 |
+
],
|
736 |
+
label="选择环境变量",
|
737 |
+
interactive=True,
|
738 |
+
)
|
739 |
+
update_var_value = gr.Textbox(
|
740 |
+
label="新的环境变量值", placeholder="输入新值"
|
741 |
+
)
|
742 |
+
update_var_type = gr.Dropdown(
|
743 |
+
choices=["text", "password"], value="text", label="类型"
|
744 |
+
)
|
745 |
+
|
746 |
+
with gr.Row():
|
747 |
+
update_var_button = gr.Button("更新环境变量", variant="primary")
|
748 |
+
delete_var_button = gr.Button("删除环境变量", variant="stop")
|
749 |
+
|
750 |
+
update_var_status = gr.Textbox(label="操作状态", interactive=False)
|
751 |
+
|
752 |
+
# 添加环境变量按钮点击事件
|
753 |
+
add_var_button.click(
|
754 |
+
fn=add_custom_env_var,
|
755 |
+
inputs=[new_var_name, new_var_value, new_var_type],
|
756 |
+
outputs=[add_var_status, custom_vars_list],
|
757 |
+
).then(
|
758 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
759 |
+
inputs=[custom_vars_list],
|
760 |
+
outputs=[update_delete_accordion],
|
761 |
+
)
|
762 |
+
|
763 |
+
# 更新环境变量按钮点击事件
|
764 |
+
update_var_button.click(
|
765 |
+
fn=update_custom_env_var,
|
766 |
+
inputs=[custom_var_dropdown, update_var_value, update_var_type],
|
767 |
+
outputs=[update_var_status, custom_vars_list],
|
768 |
+
)
|
769 |
+
|
770 |
+
# 删除环境变量按钮点击事件
|
771 |
+
delete_var_button.click(
|
772 |
+
fn=delete_custom_env_var,
|
773 |
+
inputs=[custom_var_dropdown],
|
774 |
+
outputs=[update_var_status, custom_vars_list],
|
775 |
+
).then(
|
776 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
777 |
+
inputs=[custom_vars_list],
|
778 |
+
outputs=[update_delete_accordion],
|
779 |
+
)
|
780 |
+
|
781 |
+
# 当自定义环境变量列表更新时,更新下拉菜单选项
|
782 |
+
custom_vars_list.change(
|
783 |
+
fn=lambda vars: {
|
784 |
+
"choices": [var["name"] for var in vars],
|
785 |
+
"value": None,
|
786 |
+
},
|
787 |
+
inputs=[custom_vars_list],
|
788 |
+
outputs=[custom_var_dropdown],
|
789 |
+
)
|
790 |
|
791 |
# 现有环境变量配置
|
792 |
for group_name, vars in ENV_GROUPS.items():
|
|
|
863 |
gr.Markdown(
|
864 |
"""
|
865 |
### 📝 使用说明
|
866 |
+
|
867 |
- 选择一个模型并输入您的问题
|
868 |
- 点击"运行"按钮开始执行
|
869 |
- 如需终止运行���点击"终止"按钮
|
|
|
872 |
- 在"聊天历史"标签页查看对话历史(如果有)
|
873 |
- 在"环境变量配置"标签页配置API密钥和其他环境变量
|
874 |
- 您可以添加自定义环境变量,满足特殊需求
|
875 |
+
|
876 |
### ⚠️ 注意事项
|
877 |
+
|
878 |
- 运行某些模型可能需要API密钥,请确保在"环境变量配置"标签页中设置了相应的环境变量
|
879 |
- 某些脚本可能需要较长时间运行,请耐心等待
|
880 |
- 如果运行超过30分钟,进程将自动终止
|
owl/app_en.py
ADDED
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
import gradio as gr
|
17 |
+
import subprocess
|
18 |
+
import threading
|
19 |
+
import time
|
20 |
+
from datetime import datetime
|
21 |
+
import queue
|
22 |
+
from pathlib import Path
|
23 |
+
import json
|
24 |
+
import signal
|
25 |
+
import dotenv
|
26 |
+
|
27 |
+
# Set up log queue
|
28 |
+
log_queue: queue.Queue[str] = queue.Queue()
|
29 |
+
|
30 |
+
# Currently running process
|
31 |
+
current_process = None
|
32 |
+
process_lock = threading.Lock()
|
33 |
+
|
34 |
+
# Script options
|
35 |
+
SCRIPTS = {
|
36 |
+
"Qwen Mini (Chinese)": "run_qwen_mini_zh.py",
|
37 |
+
"Qwen (Chinese)": "run_qwen_zh.py",
|
38 |
+
"Mini": "run_mini.py",
|
39 |
+
"DeepSeek (Chinese)": "run_deepseek_zh.py",
|
40 |
+
"Default": "run.py",
|
41 |
+
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
42 |
+
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
43 |
+
"Ollama": "run_ollama.py",
|
44 |
+
"Terminal": "run_terminal.py",
|
45 |
+
}
|
46 |
+
|
47 |
+
# Script descriptions
|
48 |
+
SCRIPT_DESCRIPTIONS = {
|
49 |
+
"Qwen Mini (Chinese)": "Uses the Chinese version of Alibaba Cloud's Qwen model, suitable for Chinese Q&A and tasks",
|
50 |
+
"Qwen (Chinese)": "Uses Alibaba Cloud's Qwen model, supports various tools and functions",
|
51 |
+
"Mini": "Lightweight version, uses OpenAI GPT-4o model",
|
52 |
+
"DeepSeek (Chinese)": "Uses DeepSeek model, suitable for non-multimodal tasks",
|
53 |
+
"Default": "Default OWL implementation, uses OpenAI GPT-4o model and full set of tools",
|
54 |
+
"GAIA Roleplaying": "GAIA benchmark implementation, used to evaluate model capabilities",
|
55 |
+
"OpenAI Compatible": "Uses third-party models compatible with OpenAI API, supports custom API endpoints",
|
56 |
+
"Ollama": "Uses Ollama API",
|
57 |
+
"Terminal": "Uses local terminal to execute python files",
|
58 |
+
}
|
59 |
+
|
60 |
+
# Environment variable groups
|
61 |
+
ENV_GROUPS = {
|
62 |
+
"Model API": [
|
63 |
+
{
|
64 |
+
"name": "OPENAI_API_KEY",
|
65 |
+
"label": "OpenAI API Key",
|
66 |
+
"type": "password",
|
67 |
+
"required": False,
|
68 |
+
"help": "OpenAI API key for accessing GPT models. Get it from: https://platform.openai.com/api-keys",
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"name": "OPENAI_API_BASE_URL",
|
72 |
+
"label": "OpenAI API Base URL",
|
73 |
+
"type": "text",
|
74 |
+
"required": False,
|
75 |
+
"help": "Base URL for OpenAI API, optional. Set this if using a proxy or custom endpoint.",
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"name": "QWEN_API_KEY",
|
79 |
+
"label": "Alibaba Cloud Qwen API Key",
|
80 |
+
"type": "password",
|
81 |
+
"required": False,
|
82 |
+
"help": "Alibaba Cloud Qwen API key for accessing Qwen models. Get it from: https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"name": "DEEPSEEK_API_KEY",
|
86 |
+
"label": "DeepSeek API Key",
|
87 |
+
"type": "password",
|
88 |
+
"required": False,
|
89 |
+
"help": "DeepSeek API key for accessing DeepSeek models. Get it from: https://platform.deepseek.com/api_keys",
|
90 |
+
},
|
91 |
+
],
|
92 |
+
"Search Tools": [
|
93 |
+
{
|
94 |
+
"name": "GOOGLE_API_KEY",
|
95 |
+
"label": "Google API Key",
|
96 |
+
"type": "password",
|
97 |
+
"required": False,
|
98 |
+
"help": "Google Search API key for web search functionality. Get it from: https://developers.google.com/custom-search/v1/overview",
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"name": "SEARCH_ENGINE_ID",
|
102 |
+
"label": "Search Engine ID",
|
103 |
+
"type": "text",
|
104 |
+
"required": False,
|
105 |
+
"help": "Google Custom Search Engine ID, used with Google API key. Get it from: https://developers.google.com/custom-search/v1/overview",
|
106 |
+
},
|
107 |
+
],
|
108 |
+
"Other Tools": [
|
109 |
+
{
|
110 |
+
"name": "HF_TOKEN",
|
111 |
+
"label": "Hugging Face Token",
|
112 |
+
"type": "password",
|
113 |
+
"required": False,
|
114 |
+
"help": "Hugging Face API token for accessing Hugging Face models and datasets. Get it from: https://huggingface.co/join",
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"name": "CHUNKR_API_KEY",
|
118 |
+
"label": "Chunkr API Key",
|
119 |
+
"type": "password",
|
120 |
+
"required": False,
|
121 |
+
"help": "Chunkr API key for document processing functionality. Get it from: https://chunkr.ai/",
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"name": "FIRECRAWL_API_KEY",
|
125 |
+
"label": "Firecrawl API Key",
|
126 |
+
"type": "password",
|
127 |
+
"required": False,
|
128 |
+
"help": "Firecrawl API key for web crawling functionality. Get it from: https://www.firecrawl.dev/",
|
129 |
+
},
|
130 |
+
],
|
131 |
+
"Custom Environment Variables": [], # User-defined environment variables will be stored here
|
132 |
+
}
|
133 |
+
|
134 |
+
|
135 |
+
def get_script_info(script_name):
|
136 |
+
"""Get detailed information about the script"""
|
137 |
+
return SCRIPT_DESCRIPTIONS.get(script_name, "No description available")
|
138 |
+
|
139 |
+
|
140 |
+
def load_env_vars():
|
141 |
+
"""Load environment variables"""
|
142 |
+
env_vars = {}
|
143 |
+
# Try to load from .env file
|
144 |
+
dotenv.load_dotenv()
|
145 |
+
|
146 |
+
# Get all environment variables
|
147 |
+
for group in ENV_GROUPS.values():
|
148 |
+
for var in group:
|
149 |
+
env_vars[var["name"]] = os.environ.get(var["name"], "")
|
150 |
+
|
151 |
+
# Load other environment variables that may exist in the .env file
|
152 |
+
if Path(".env").exists():
|
153 |
+
try:
|
154 |
+
with open(".env", "r", encoding="utf-8") as f:
|
155 |
+
for line in f:
|
156 |
+
line = line.strip()
|
157 |
+
if line and not line.startswith("#") and "=" in line:
|
158 |
+
try:
|
159 |
+
key, value = line.split("=", 1)
|
160 |
+
key = key.strip()
|
161 |
+
value = value.strip()
|
162 |
+
|
163 |
+
# Handle quoted values
|
164 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
165 |
+
value.startswith("'") and value.endswith("'")
|
166 |
+
):
|
167 |
+
value = value[
|
168 |
+
1:-1
|
169 |
+
] # Remove quotes at the beginning and end
|
170 |
+
|
171 |
+
# Check if it's a known environment variable
|
172 |
+
known_var = False
|
173 |
+
for group in ENV_GROUPS.values():
|
174 |
+
if any(var["name"] == key for var in group):
|
175 |
+
known_var = True
|
176 |
+
break
|
177 |
+
|
178 |
+
# If it's not a known environment variable, add it to the custom environment variables group
|
179 |
+
if not known_var and key not in env_vars:
|
180 |
+
ENV_GROUPS["Custom Environment Variables"].append(
|
181 |
+
{
|
182 |
+
"name": key,
|
183 |
+
"label": key,
|
184 |
+
"type": "text",
|
185 |
+
"required": False,
|
186 |
+
"help": "User-defined environment variable",
|
187 |
+
}
|
188 |
+
)
|
189 |
+
env_vars[key] = value
|
190 |
+
except Exception as e:
|
191 |
+
print(
|
192 |
+
f"Error parsing environment variable line: {line}, error: {str(e)}"
|
193 |
+
)
|
194 |
+
except Exception as e:
|
195 |
+
print(f"Error loading .env file: {str(e)}")
|
196 |
+
|
197 |
+
return env_vars
|
198 |
+
|
199 |
+
|
200 |
+
def save_env_vars(env_vars):
|
201 |
+
"""Save environment variables to .env file"""
|
202 |
+
# Read existing .env file content
|
203 |
+
env_path = Path(".env")
|
204 |
+
existing_content = {}
|
205 |
+
|
206 |
+
if env_path.exists():
|
207 |
+
try:
|
208 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
209 |
+
for line in f:
|
210 |
+
line = line.strip()
|
211 |
+
if line and not line.startswith("#") and "=" in line:
|
212 |
+
try:
|
213 |
+
key, value = line.split("=", 1)
|
214 |
+
existing_content[key.strip()] = value.strip()
|
215 |
+
except Exception as e:
|
216 |
+
print(
|
217 |
+
f"Error parsing environment variable line: {line}, error: {str(e)}"
|
218 |
+
)
|
219 |
+
except Exception as e:
|
220 |
+
print(f"Error reading .env file: {str(e)}")
|
221 |
+
|
222 |
+
# Update environment variables
|
223 |
+
for key, value in env_vars.items():
|
224 |
+
if value is not None: # Allow empty string values, but not None
|
225 |
+
# Ensure the value is a string
|
226 |
+
value = str(value) # Ensure the value is a string
|
227 |
+
|
228 |
+
# Check if the value is already wrapped in quotes
|
229 |
+
if (value.startswith('"') and value.endswith('"')) or (
|
230 |
+
value.startswith("'") and value.endswith("'")
|
231 |
+
):
|
232 |
+
# Already wrapped in quotes, keep as is
|
233 |
+
existing_content[key] = value
|
234 |
+
# Update environment variable by removing quotes
|
235 |
+
os.environ[key] = value[1:-1]
|
236 |
+
else:
|
237 |
+
# Not wrapped in quotes, add double quotes
|
238 |
+
# Wrap the value in double quotes to ensure special characters are handled correctly
|
239 |
+
quoted_value = f'"{value}"'
|
240 |
+
existing_content[key] = quoted_value
|
241 |
+
# Also update the environment variable for the current process (using the unquoted value)
|
242 |
+
os.environ[key] = value
|
243 |
+
|
244 |
+
# Write to .env file
|
245 |
+
try:
|
246 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
247 |
+
for key, value in existing_content.items():
|
248 |
+
f.write(f"{key}={value}\n")
|
249 |
+
except Exception as e:
|
250 |
+
print(f"Error writing to .env file: {str(e)}")
|
251 |
+
return f"❌ Failed to save environment variables: {str(e)}"
|
252 |
+
|
253 |
+
return "✅ Environment variables saved"
|
254 |
+
|
255 |
+
|
256 |
+
def add_custom_env_var(name, value, var_type):
|
257 |
+
"""Add custom environment variable"""
|
258 |
+
if not name:
|
259 |
+
return "❌ Environment variable name cannot be empty", None
|
260 |
+
|
261 |
+
# Check if an environment variable with the same name already exists
|
262 |
+
for group in ENV_GROUPS.values():
|
263 |
+
if any(var["name"] == name for var in group):
|
264 |
+
return f"❌ Environment variable {name} already exists", None
|
265 |
+
|
266 |
+
# Add to custom environment variables group
|
267 |
+
ENV_GROUPS["Custom Environment Variables"].append(
|
268 |
+
{
|
269 |
+
"name": name,
|
270 |
+
"label": name,
|
271 |
+
"type": var_type,
|
272 |
+
"required": False,
|
273 |
+
"help": "User-defined environment variable",
|
274 |
+
}
|
275 |
+
)
|
276 |
+
|
277 |
+
# Save environment variables
|
278 |
+
env_vars = {name: value}
|
279 |
+
save_env_vars(env_vars)
|
280 |
+
|
281 |
+
# Return success message and updated environment variable group
|
282 |
+
return f"✅ Added environment variable {name}", ENV_GROUPS[
|
283 |
+
"Custom Environment Variables"
|
284 |
+
]
|
285 |
+
|
286 |
+
|
287 |
+
def update_custom_env_var(name, value, var_type):
|
288 |
+
"""Update custom environment variable"""
|
289 |
+
if not name:
|
290 |
+
return "❌ Environment variable name cannot be empty", None
|
291 |
+
|
292 |
+
# Check if the environment variable exists in the custom environment variables group
|
293 |
+
found = False
|
294 |
+
for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
|
295 |
+
if var["name"] == name:
|
296 |
+
# Update type
|
297 |
+
ENV_GROUPS["Custom Environment Variables"][i]["type"] = var_type
|
298 |
+
found = True
|
299 |
+
break
|
300 |
+
|
301 |
+
if not found:
|
302 |
+
return f"❌ Custom environment variable {name} does not exist", None
|
303 |
+
|
304 |
+
# Save environment variable value
|
305 |
+
env_vars = {name: value}
|
306 |
+
save_env_vars(env_vars)
|
307 |
+
|
308 |
+
# Return success message and updated environment variable group
|
309 |
+
return f"✅ Updated environment variable {name}", ENV_GROUPS[
|
310 |
+
"Custom Environment Variables"
|
311 |
+
]
|
312 |
+
|
313 |
+
|
314 |
+
def delete_custom_env_var(name):
|
315 |
+
"""Delete custom environment variable"""
|
316 |
+
if not name:
|
317 |
+
return "❌ Environment variable name cannot be empty", None
|
318 |
+
|
319 |
+
# Check if the environment variable exists in the custom environment variables group
|
320 |
+
found = False
|
321 |
+
for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
|
322 |
+
if var["name"] == name:
|
323 |
+
# Delete from custom environment variables group
|
324 |
+
del ENV_GROUPS["Custom Environment Variables"][i]
|
325 |
+
found = True
|
326 |
+
break
|
327 |
+
|
328 |
+
if not found:
|
329 |
+
return f"❌ Custom environment variable {name} does not exist", None
|
330 |
+
|
331 |
+
# Delete the environment variable from .env file
|
332 |
+
env_path = Path(".env")
|
333 |
+
if env_path.exists():
|
334 |
+
try:
|
335 |
+
with open(env_path, "r", encoding="utf-8") as f:
|
336 |
+
lines = f.readlines()
|
337 |
+
|
338 |
+
with open(env_path, "w", encoding="utf-8") as f:
|
339 |
+
for line in lines:
|
340 |
+
try:
|
341 |
+
# More precisely match environment variable lines
|
342 |
+
line_stripped = line.strip()
|
343 |
+
# Check if it's a comment line or empty line
|
344 |
+
if not line_stripped or line_stripped.startswith("#"):
|
345 |
+
f.write(line) # Keep comment lines and empty lines
|
346 |
+
continue
|
347 |
+
|
348 |
+
# Check if it contains an equals sign
|
349 |
+
if "=" not in line_stripped:
|
350 |
+
f.write(line) # Keep lines without equals sign
|
351 |
+
continue
|
352 |
+
|
353 |
+
# Extract variable name and check if it matches the variable to be deleted
|
354 |
+
var_name = line_stripped.split("=", 1)[0].strip()
|
355 |
+
if var_name != name:
|
356 |
+
f.write(line) # Keep variables that don't match
|
357 |
+
except Exception as e:
|
358 |
+
print(
|
359 |
+
f"Error processing .env file line: {line}, error: {str(e)}"
|
360 |
+
)
|
361 |
+
# Keep the original line when an error occurs
|
362 |
+
f.write(line)
|
363 |
+
except Exception as e:
|
364 |
+
print(f"Error deleting environment variable: {str(e)}")
|
365 |
+
return f"❌ Failed to delete environment variable: {str(e)}", None
|
366 |
+
|
367 |
+
# Delete from current process environment variables
|
368 |
+
if name in os.environ:
|
369 |
+
del os.environ[name]
|
370 |
+
|
371 |
+
# Return success message and updated environment variable group
|
372 |
+
return f"✅ Deleted environment variable {name}", ENV_GROUPS[
|
373 |
+
"Custom Environment Variables"
|
374 |
+
]
|
375 |
+
|
376 |
+
|
377 |
+
def terminate_process():
|
378 |
+
"""Terminate the currently running process"""
|
379 |
+
global current_process
|
380 |
+
|
381 |
+
with process_lock:
|
382 |
+
if current_process is not None and current_process.poll() is None:
|
383 |
+
try:
|
384 |
+
# On Windows, use taskkill to forcibly terminate the process tree
|
385 |
+
if os.name == "nt":
|
386 |
+
# Get process ID
|
387 |
+
pid = current_process.pid
|
388 |
+
# Use taskkill command to terminate the process and its children - avoid using shell=True for better security
|
389 |
+
try:
|
390 |
+
subprocess.run(
|
391 |
+
["taskkill", "/F", "/T", "/PID", str(pid)], check=False
|
392 |
+
)
|
393 |
+
except subprocess.SubprocessError as e:
|
394 |
+
log_queue.put(f"Error terminating process: {str(e)}\n")
|
395 |
+
return f"❌ Error terminating process: {str(e)}"
|
396 |
+
else:
|
397 |
+
# On Unix, use SIGTERM and SIGKILL
|
398 |
+
current_process.terminate()
|
399 |
+
try:
|
400 |
+
current_process.wait(timeout=3)
|
401 |
+
except subprocess.TimeoutExpired:
|
402 |
+
current_process.kill()
|
403 |
+
|
404 |
+
# Wait for process to terminate
|
405 |
+
try:
|
406 |
+
current_process.wait(timeout=2)
|
407 |
+
except subprocess.TimeoutExpired:
|
408 |
+
pass # Already tried to force terminate, ignore timeout
|
409 |
+
|
410 |
+
log_queue.put("Process terminated\n")
|
411 |
+
return "✅ Process terminated"
|
412 |
+
except Exception as e:
|
413 |
+
log_queue.put(f"Error terminating process: {str(e)}\n")
|
414 |
+
return f"❌ Error terminating process: {str(e)}"
|
415 |
+
else:
|
416 |
+
return "❌ No process is currently running"
|
417 |
+
|
418 |
+
|
419 |
+
def run_script(script_dropdown, question, progress=gr.Progress()):
|
420 |
+
"""Run the selected script and return the output"""
|
421 |
+
global current_process
|
422 |
+
|
423 |
+
script_name = SCRIPTS.get(script_dropdown)
|
424 |
+
if not script_name:
|
425 |
+
return "❌ Invalid script selection", "", "", "", None
|
426 |
+
|
427 |
+
if not question.strip():
|
428 |
+
return "Please enter a question!", "", "", "", None
|
429 |
+
|
430 |
+
# Clear the log queue
|
431 |
+
while not log_queue.empty():
|
432 |
+
log_queue.get()
|
433 |
+
|
434 |
+
# Create log directory
|
435 |
+
log_dir = Path("logs")
|
436 |
+
log_dir.mkdir(exist_ok=True)
|
437 |
+
|
438 |
+
# Create log file with timestamp
|
439 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
440 |
+
log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
|
441 |
+
|
442 |
+
# Build command
|
443 |
+
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
444 |
+
cmd = [
|
445 |
+
sys.executable,
|
446 |
+
os.path.join(base_path, "owl", "script_adapter.py"),
|
447 |
+
os.path.join(base_path, "owl", script_name),
|
448 |
+
]
|
449 |
+
|
450 |
+
# Create a copy of environment variables and add the question
|
451 |
+
env = os.environ.copy()
|
452 |
+
# Ensure question is a string type
|
453 |
+
if not isinstance(question, str):
|
454 |
+
question = str(question)
|
455 |
+
# Preserve newlines, but ensure it's a valid string
|
456 |
+
env["OWL_QUESTION"] = question
|
457 |
+
|
458 |
+
# Start the process
|
459 |
+
with process_lock:
|
460 |
+
current_process = subprocess.Popen(
|
461 |
+
cmd,
|
462 |
+
stdout=subprocess.PIPE,
|
463 |
+
stderr=subprocess.STDOUT,
|
464 |
+
text=True,
|
465 |
+
bufsize=1,
|
466 |
+
env=env,
|
467 |
+
encoding="utf-8",
|
468 |
+
)
|
469 |
+
|
470 |
+
# Create thread to read output
|
471 |
+
def read_output():
|
472 |
+
try:
|
473 |
+
# Use a unique timestamp to ensure log filename is not duplicated
|
474 |
+
timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
475 |
+
unique_log_file = (
|
476 |
+
log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
|
477 |
+
)
|
478 |
+
|
479 |
+
# Use this unique filename to write logs
|
480 |
+
with open(unique_log_file, "w", encoding="utf-8") as f:
|
481 |
+
# Update global log file path
|
482 |
+
nonlocal log_file
|
483 |
+
log_file = unique_log_file
|
484 |
+
|
485 |
+
for line in iter(current_process.stdout.readline, ""):
|
486 |
+
if line:
|
487 |
+
# Write to log file
|
488 |
+
f.write(line)
|
489 |
+
f.flush()
|
490 |
+
# Add to queue
|
491 |
+
log_queue.put(line)
|
492 |
+
except Exception as e:
|
493 |
+
log_queue.put(f"Error reading output: {str(e)}\n")
|
494 |
+
|
495 |
+
# Start the reading thread
|
496 |
+
threading.Thread(target=read_output, daemon=True).start()
|
497 |
+
|
498 |
+
# Collect logs
|
499 |
+
logs = []
|
500 |
+
progress(0, desc="Running...")
|
501 |
+
|
502 |
+
# Wait for process to complete or timeout
|
503 |
+
start_time = time.time()
|
504 |
+
timeout = 1800 # 30 minutes timeout
|
505 |
+
|
506 |
+
while current_process.poll() is None:
|
507 |
+
# Check if timeout
|
508 |
+
if time.time() - start_time > timeout:
|
509 |
+
with process_lock:
|
510 |
+
if current_process.poll() is None:
|
511 |
+
if os.name == "nt":
|
512 |
+
current_process.send_signal(signal.CTRL_BREAK_EVENT)
|
513 |
+
else:
|
514 |
+
current_process.terminate()
|
515 |
+
log_queue.put("Execution timeout, process terminated\n")
|
516 |
+
break
|
517 |
+
|
518 |
+
# Get logs from queue
|
519 |
+
while not log_queue.empty():
|
520 |
+
log = log_queue.get()
|
521 |
+
logs.append(log)
|
522 |
+
|
523 |
+
# Update progress
|
524 |
+
elapsed = time.time() - start_time
|
525 |
+
progress(min(elapsed / 300, 0.99), desc="Running...")
|
526 |
+
|
527 |
+
# Short sleep to reduce CPU usage
|
528 |
+
time.sleep(0.1)
|
529 |
+
|
530 |
+
# Update log display once per second
|
531 |
+
yield (
|
532 |
+
status_message(current_process),
|
533 |
+
extract_answer(logs),
|
534 |
+
"".join(logs),
|
535 |
+
str(log_file),
|
536 |
+
None,
|
537 |
+
)
|
538 |
+
|
539 |
+
# Get remaining logs
|
540 |
+
while not log_queue.empty():
|
541 |
+
logs.append(log_queue.get())
|
542 |
+
|
543 |
+
# Extract chat history (if any)
|
544 |
+
chat_history = extract_chat_history(logs)
|
545 |
+
|
546 |
+
# Return final status and logs
|
547 |
+
return (
|
548 |
+
status_message(current_process),
|
549 |
+
extract_answer(logs),
|
550 |
+
"".join(logs),
|
551 |
+
str(log_file),
|
552 |
+
chat_history,
|
553 |
+
)
|
554 |
+
|
555 |
+
|
556 |
+
def status_message(process):
|
557 |
+
"""Return status message based on process status"""
|
558 |
+
if process.poll() is None:
|
559 |
+
return "⏳ Running..."
|
560 |
+
elif process.returncode == 0:
|
561 |
+
return "✅ Execution successful"
|
562 |
+
else:
|
563 |
+
return f"❌ Execution failed (return code: {process.returncode})"
|
564 |
+
|
565 |
+
|
566 |
+
def extract_answer(logs):
|
567 |
+
"""Extract answer from logs"""
|
568 |
+
answer = ""
|
569 |
+
for log in logs:
|
570 |
+
if "Answer:" in log:
|
571 |
+
answer = log.split("Answer:", 1)[1].strip()
|
572 |
+
break
|
573 |
+
return answer
|
574 |
+
|
575 |
+
|
576 |
+
def extract_chat_history(logs):
|
577 |
+
"""Try to extract chat history from logs"""
|
578 |
+
try:
|
579 |
+
chat_json_str = ""
|
580 |
+
capture_json = False
|
581 |
+
|
582 |
+
for log in logs:
|
583 |
+
if "chat_history" in log:
|
584 |
+
# Start capturing JSON
|
585 |
+
start_idx = log.find("[")
|
586 |
+
if start_idx != -1:
|
587 |
+
capture_json = True
|
588 |
+
chat_json_str = log[start_idx:]
|
589 |
+
elif capture_json:
|
590 |
+
# Continue capturing JSON until finding the matching closing bracket
|
591 |
+
chat_json_str += log
|
592 |
+
if "]" in log:
|
593 |
+
# Found closing bracket, try to parse JSON
|
594 |
+
end_idx = chat_json_str.rfind("]") + 1
|
595 |
+
if end_idx > 0:
|
596 |
+
try:
|
597 |
+
# Clean up possible extra text
|
598 |
+
json_str = chat_json_str[:end_idx].strip()
|
599 |
+
chat_data = json.loads(json_str)
|
600 |
+
|
601 |
+
# Format for use with Gradio chat component
|
602 |
+
formatted_chat = []
|
603 |
+
for msg in chat_data:
|
604 |
+
if "role" in msg and "content" in msg:
|
605 |
+
role = (
|
606 |
+
"User" if msg["role"] == "user" else "Assistant"
|
607 |
+
)
|
608 |
+
formatted_chat.append([role, msg["content"]])
|
609 |
+
return formatted_chat
|
610 |
+
except json.JSONDecodeError:
|
611 |
+
# If parsing fails, continue capturing
|
612 |
+
pass
|
613 |
+
except Exception:
|
614 |
+
# Other errors, stop capturing
|
615 |
+
capture_json = False
|
616 |
+
except Exception:
|
617 |
+
pass
|
618 |
+
return None
|
619 |
+
|
620 |
+
|
621 |
+
def create_ui():
|
622 |
+
"""Create Gradio interface"""
|
623 |
+
# Load environment variables
|
624 |
+
env_vars = load_env_vars()
|
625 |
+
|
626 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
|
627 |
+
gr.Markdown(
|
628 |
+
"""
|
629 |
+
# 🦉 OWL Intelligent Assistant Platform
|
630 |
+
|
631 |
+
Select a model and enter your question, the system will run the corresponding script and display the results.
|
632 |
+
"""
|
633 |
+
)
|
634 |
+
|
635 |
+
with gr.Tabs():
|
636 |
+
with gr.TabItem("Run Mode"):
|
637 |
+
with gr.Row():
|
638 |
+
with gr.Column(scale=1):
|
639 |
+
# Ensure default value is a key that exists in SCRIPTS
|
640 |
+
default_script = list(SCRIPTS.keys())[0] if SCRIPTS else None
|
641 |
+
script_dropdown = gr.Dropdown(
|
642 |
+
choices=list(SCRIPTS.keys()),
|
643 |
+
value=default_script,
|
644 |
+
label="Select Mode",
|
645 |
+
)
|
646 |
+
|
647 |
+
script_info = gr.Textbox(
|
648 |
+
value=get_script_info(default_script)
|
649 |
+
if default_script
|
650 |
+
else "",
|
651 |
+
label="Model Description",
|
652 |
+
interactive=False,
|
653 |
+
)
|
654 |
+
|
655 |
+
script_dropdown.change(
|
656 |
+
fn=lambda x: get_script_info(x),
|
657 |
+
inputs=script_dropdown,
|
658 |
+
outputs=script_info,
|
659 |
+
)
|
660 |
+
|
661 |
+
question_input = gr.Textbox(
|
662 |
+
lines=8,
|
663 |
+
placeholder="Please enter your question...",
|
664 |
+
label="Question",
|
665 |
+
elem_id="question_input",
|
666 |
+
show_copy_button=True,
|
667 |
+
)
|
668 |
+
|
669 |
+
gr.Markdown(
|
670 |
+
"""
|
671 |
+
> **Note**: Your question will replace the default question in the script. The system will automatically handle the replacement, ensuring your question is used correctly.
|
672 |
+
> Multi-line input is supported, line breaks will be preserved.
|
673 |
+
"""
|
674 |
+
)
|
675 |
+
|
676 |
+
with gr.Row():
|
677 |
+
run_button = gr.Button("Run", variant="primary")
|
678 |
+
stop_button = gr.Button("Stop", variant="stop")
|
679 |
+
|
680 |
+
with gr.Column(scale=2):
|
681 |
+
with gr.Tabs():
|
682 |
+
with gr.TabItem("Results"):
|
683 |
+
status_output = gr.Textbox(label="Status")
|
684 |
+
answer_output = gr.Textbox(label="Answer", lines=10)
|
685 |
+
log_file_output = gr.Textbox(label="Log File Path")
|
686 |
+
|
687 |
+
with gr.TabItem("Run Logs"):
|
688 |
+
log_output = gr.Textbox(label="Complete Logs", lines=25)
|
689 |
+
|
690 |
+
with gr.TabItem("Chat History"):
|
691 |
+
chat_output = gr.Chatbot(label="Conversation History")
|
692 |
+
|
693 |
+
# Example questions
|
694 |
+
examples = [
|
695 |
+
[
|
696 |
+
"Qwen Mini (Chinese)",
|
697 |
+
"Browse Amazon and find a product that is attractive to programmers. Please provide the product name and price.",
|
698 |
+
],
|
699 |
+
[
|
700 |
+
"DeepSeek (Chinese)",
|
701 |
+
"Please analyze the latest statistics of the CAMEL-AI project on GitHub. Find out the number of stars, number of contributors, and recent activity of the project. Then, create a simple Excel spreadsheet to display this data and generate a bar chart to visualize these metrics. Finally, summarize the popularity and development trends of the CAMEL project.",
|
702 |
+
],
|
703 |
+
[
|
704 |
+
"Default",
|
705 |
+
"Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer.",
|
706 |
+
],
|
707 |
+
]
|
708 |
+
|
709 |
+
gr.Examples(examples=examples, inputs=[script_dropdown, question_input])
|
710 |
+
|
711 |
+
with gr.TabItem("Environment Variable Configuration"):
|
712 |
+
env_inputs = {}
|
713 |
+
save_status = gr.Textbox(label="Save Status", interactive=False)
|
714 |
+
|
715 |
+
# Add custom environment variables section
|
716 |
+
with gr.Accordion("Add Custom Environment Variables", open=True):
|
717 |
+
with gr.Row():
|
718 |
+
new_var_name = gr.Textbox(
|
719 |
+
label="Environment Variable Name",
|
720 |
+
placeholder="Example: MY_CUSTOM_API_KEY",
|
721 |
+
)
|
722 |
+
new_var_value = gr.Textbox(
|
723 |
+
label="Environment Variable Value",
|
724 |
+
placeholder="Enter value",
|
725 |
+
)
|
726 |
+
new_var_type = gr.Dropdown(
|
727 |
+
choices=["text", "password"], value="text", label="Type"
|
728 |
+
)
|
729 |
+
|
730 |
+
add_var_button = gr.Button(
|
731 |
+
"Add Environment Variable", variant="primary"
|
732 |
+
)
|
733 |
+
add_var_status = gr.Textbox(label="Add Status", interactive=False)
|
734 |
+
|
735 |
+
# Custom environment variables list
|
736 |
+
custom_vars_list = gr.JSON(
|
737 |
+
value=ENV_GROUPS["Custom Environment Variables"],
|
738 |
+
label="Added Custom Environment Variables",
|
739 |
+
visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
|
740 |
+
)
|
741 |
+
|
742 |
+
# Update and delete custom environment variables section
|
743 |
+
with gr.Accordion(
|
744 |
+
"Update or Delete Custom Environment Variables",
|
745 |
+
open=True,
|
746 |
+
visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
|
747 |
+
) as update_delete_accordion:
|
748 |
+
with gr.Row():
|
749 |
+
# Create dropdown menu to display all custom environment variables
|
750 |
+
custom_var_dropdown = gr.Dropdown(
|
751 |
+
choices=[
|
752 |
+
var["name"]
|
753 |
+
for var in ENV_GROUPS["Custom Environment Variables"]
|
754 |
+
],
|
755 |
+
label="Select Environment Variable",
|
756 |
+
interactive=True,
|
757 |
+
)
|
758 |
+
update_var_value = gr.Textbox(
|
759 |
+
label="New Environment Variable Value",
|
760 |
+
placeholder="Enter new value",
|
761 |
+
)
|
762 |
+
update_var_type = gr.Dropdown(
|
763 |
+
choices=["text", "password"], value="text", label="Type"
|
764 |
+
)
|
765 |
+
|
766 |
+
with gr.Row():
|
767 |
+
update_var_button = gr.Button(
|
768 |
+
"Update Environment Variable", variant="primary"
|
769 |
+
)
|
770 |
+
delete_var_button = gr.Button(
|
771 |
+
"Delete Environment Variable", variant="stop"
|
772 |
+
)
|
773 |
+
|
774 |
+
update_var_status = gr.Textbox(
|
775 |
+
label="Operation Status", interactive=False
|
776 |
+
)
|
777 |
+
|
778 |
+
# Add environment variable button click event
|
779 |
+
add_var_button.click(
|
780 |
+
fn=add_custom_env_var,
|
781 |
+
inputs=[new_var_name, new_var_value, new_var_type],
|
782 |
+
outputs=[add_var_status, custom_vars_list],
|
783 |
+
).then(
|
784 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
785 |
+
inputs=[custom_vars_list],
|
786 |
+
outputs=[update_delete_accordion],
|
787 |
+
)
|
788 |
+
|
789 |
+
# Update environment variable button click event
|
790 |
+
update_var_button.click(
|
791 |
+
fn=update_custom_env_var,
|
792 |
+
inputs=[custom_var_dropdown, update_var_value, update_var_type],
|
793 |
+
outputs=[update_var_status, custom_vars_list],
|
794 |
+
)
|
795 |
+
|
796 |
+
# Delete environment variable button click event
|
797 |
+
delete_var_button.click(
|
798 |
+
fn=delete_custom_env_var,
|
799 |
+
inputs=[custom_var_dropdown],
|
800 |
+
outputs=[update_var_status, custom_vars_list],
|
801 |
+
).then(
|
802 |
+
fn=lambda vars: {"visible": len(vars) > 0},
|
803 |
+
inputs=[custom_vars_list],
|
804 |
+
outputs=[update_delete_accordion],
|
805 |
+
)
|
806 |
+
|
807 |
+
# When custom environment variables list is updated, update dropdown menu options
|
808 |
+
custom_vars_list.change(
|
809 |
+
fn=lambda vars: {
|
810 |
+
"choices": [var["name"] for var in vars],
|
811 |
+
"value": None,
|
812 |
+
},
|
813 |
+
inputs=[custom_vars_list],
|
814 |
+
outputs=[custom_var_dropdown],
|
815 |
+
)
|
816 |
+
|
817 |
+
# Existing environment variable configuration
|
818 |
+
for group_name, vars in ENV_GROUPS.items():
|
819 |
+
if (
|
820 |
+
group_name != "Custom Environment Variables" or len(vars) > 0
|
821 |
+
): # Only show non-empty custom environment variable groups
|
822 |
+
with gr.Accordion(
|
823 |
+
group_name,
|
824 |
+
open=(group_name != "Custom Environment Variables"),
|
825 |
+
):
|
826 |
+
for var in vars:
|
827 |
+
# Add help information
|
828 |
+
gr.Markdown(f"**{var['help']}**")
|
829 |
+
|
830 |
+
if var["type"] == "password":
|
831 |
+
env_inputs[var["name"]] = gr.Textbox(
|
832 |
+
value=env_vars.get(var["name"], ""),
|
833 |
+
label=var["label"],
|
834 |
+
placeholder=f"Please enter {var['label']}",
|
835 |
+
type="password",
|
836 |
+
)
|
837 |
+
else:
|
838 |
+
env_inputs[var["name"]] = gr.Textbox(
|
839 |
+
value=env_vars.get(var["name"], ""),
|
840 |
+
label=var["label"],
|
841 |
+
placeholder=f"Please enter {var['label']}",
|
842 |
+
)
|
843 |
+
|
844 |
+
save_button = gr.Button("Save Environment Variables", variant="primary")
|
845 |
+
|
846 |
+
# Save environment variables
|
847 |
+
save_inputs = [
|
848 |
+
env_inputs[var_name]
|
849 |
+
for group in ENV_GROUPS.values()
|
850 |
+
for var in group
|
851 |
+
for var_name in [var["name"]]
|
852 |
+
if var_name in env_inputs
|
853 |
+
]
|
854 |
+
save_button.click(
|
855 |
+
fn=lambda *values: save_env_vars(
|
856 |
+
dict(
|
857 |
+
zip(
|
858 |
+
[
|
859 |
+
var["name"]
|
860 |
+
for group in ENV_GROUPS.values()
|
861 |
+
for var in group
|
862 |
+
if var["name"] in env_inputs
|
863 |
+
],
|
864 |
+
values,
|
865 |
+
)
|
866 |
+
)
|
867 |
+
),
|
868 |
+
inputs=save_inputs,
|
869 |
+
outputs=save_status,
|
870 |
+
)
|
871 |
+
|
872 |
+
# Run script
|
873 |
+
run_button.click(
|
874 |
+
fn=run_script,
|
875 |
+
inputs=[script_dropdown, question_input],
|
876 |
+
outputs=[
|
877 |
+
status_output,
|
878 |
+
answer_output,
|
879 |
+
log_output,
|
880 |
+
log_file_output,
|
881 |
+
chat_output,
|
882 |
+
],
|
883 |
+
show_progress=True,
|
884 |
+
)
|
885 |
+
|
886 |
+
# Terminate execution
|
887 |
+
stop_button.click(fn=terminate_process, inputs=[], outputs=[status_output])
|
888 |
+
|
889 |
+
# Add footer
|
890 |
+
gr.Markdown(
|
891 |
+
"""
|
892 |
+
### 📝 Instructions
|
893 |
+
|
894 |
+
- Select a model and enter your question
|
895 |
+
- Click the "Run" button to start execution
|
896 |
+
- To stop execution, click the "Stop" button
|
897 |
+
- View execution status and answers in the "Results" tab
|
898 |
+
- View complete logs in the "Run Logs" tab
|
899 |
+
- View conversation history in the "Chat History" tab (if available)
|
900 |
+
- Configure API keys and other environment variables in the "Environment Variable Configuration" tab
|
901 |
+
- You can add custom environment variables to meet special requirements
|
902 |
+
|
903 |
+
### ⚠️ Notes
|
904 |
+
|
905 |
+
- Running some models may require API keys, please make sure you have set the corresponding environment variables in the "Environment Variable Configuration" tab
|
906 |
+
- Some scripts may take a long time to run, please be patient
|
907 |
+
- If execution exceeds 30 minutes, the process will automatically terminate
|
908 |
+
- Your question will replace the default question in the script, ensure the question is compatible with the selected model
|
909 |
+
"""
|
910 |
+
)
|
911 |
+
|
912 |
+
return app
|
913 |
+
|
914 |
+
|
915 |
+
if __name__ == "__main__":
|
916 |
+
# Create and launch the application
|
917 |
+
app = create_ui()
|
918 |
+
app.queue().launch(share=True)
|
owl/run.py
CHANGED
@@ -21,6 +21,7 @@ from camel.toolkits import (
|
|
21 |
SearchToolkit,
|
22 |
VideoAnalysisToolkit,
|
23 |
WebToolkit,
|
|
|
24 |
)
|
25 |
from camel.types import ModelPlatformType, ModelType
|
26 |
from camel.logger import set_log_level
|
@@ -97,6 +98,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
97 |
SearchToolkit().search_wiki,
|
98 |
*ExcelToolkit().get_tools(),
|
99 |
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
|
|
100 |
]
|
101 |
|
102 |
# Configure agent roles and parameters
|
|
|
21 |
SearchToolkit,
|
22 |
VideoAnalysisToolkit,
|
23 |
WebToolkit,
|
24 |
+
FileWriteToolkit,
|
25 |
)
|
26 |
from camel.types import ModelPlatformType, ModelType
|
27 |
from camel.logger import set_log_level
|
|
|
98 |
SearchToolkit().search_wiki,
|
99 |
*ExcelToolkit().get_tools(),
|
100 |
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
101 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
102 |
]
|
103 |
|
104 |
# Configure agent roles and parameters
|
owl/run_deepseek_zh.py
CHANGED
@@ -23,9 +23,10 @@ from dotenv import load_dotenv
|
|
23 |
|
24 |
from camel.models import ModelFactory
|
25 |
from camel.toolkits import (
|
26 |
-
CodeExecutionToolkit,
|
27 |
ExcelToolkit,
|
28 |
SearchToolkit,
|
|
|
|
|
29 |
)
|
30 |
from camel.types import ModelPlatformType, ModelType
|
31 |
|
@@ -61,31 +62,6 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
61 |
model_type=ModelType.DEEPSEEK_CHAT,
|
62 |
model_config_dict={"temperature": 0},
|
63 |
),
|
64 |
-
"web": ModelFactory.create(
|
65 |
-
model_platform=ModelPlatformType.DEEPSEEK,
|
66 |
-
model_type=ModelType.DEEPSEEK_CHAT,
|
67 |
-
model_config_dict={"temperature": 0},
|
68 |
-
),
|
69 |
-
"planning": ModelFactory.create(
|
70 |
-
model_platform=ModelPlatformType.DEEPSEEK,
|
71 |
-
model_type=ModelType.DEEPSEEK_CHAT,
|
72 |
-
model_config_dict={"temperature": 0},
|
73 |
-
),
|
74 |
-
"video": ModelFactory.create(
|
75 |
-
model_platform=ModelPlatformType.DEEPSEEK,
|
76 |
-
model_type=ModelType.DEEPSEEK_CHAT,
|
77 |
-
model_config_dict={"temperature": 0},
|
78 |
-
),
|
79 |
-
"image": ModelFactory.create(
|
80 |
-
model_platform=ModelPlatformType.DEEPSEEK,
|
81 |
-
model_type=ModelType.DEEPSEEK_CHAT,
|
82 |
-
model_config_dict={"temperature": 0},
|
83 |
-
),
|
84 |
-
"document": ModelFactory.create(
|
85 |
-
model_platform=ModelPlatformType.DEEPSEEK,
|
86 |
-
model_type=ModelType.DEEPSEEK_CHAT,
|
87 |
-
model_config_dict={"temperature": 0},
|
88 |
-
),
|
89 |
}
|
90 |
|
91 |
# Configure toolkits
|
@@ -94,7 +70,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
94 |
SearchToolkit().search_duckduckgo,
|
95 |
SearchToolkit().search_wiki,
|
96 |
*ExcelToolkit().get_tools(),
|
97 |
-
*
|
98 |
]
|
99 |
|
100 |
# Configure agent roles and parameters
|
@@ -124,9 +100,7 @@ def main():
|
|
124 |
r"""Main function to run the OWL system with an example question."""
|
125 |
# Example research question
|
126 |
question = (
|
127 |
-
"
|
128 |
-
"贡献者数量和最近的活跃度。然后,创建一个简单的Excel表格来展示这些数据,"
|
129 |
-
"并生成一个柱状图来可视化这些指标。最后,总结CAMEL项目的受欢迎程度和发展趋势。"
|
130 |
)
|
131 |
|
132 |
# Construct and run the society
|
|
|
23 |
|
24 |
from camel.models import ModelFactory
|
25 |
from camel.toolkits import (
|
|
|
26 |
ExcelToolkit,
|
27 |
SearchToolkit,
|
28 |
+
FileWriteToolkit,
|
29 |
+
CodeExecutionToolkit,
|
30 |
)
|
31 |
from camel.types import ModelPlatformType, ModelType
|
32 |
|
|
|
62 |
model_type=ModelType.DEEPSEEK_CHAT,
|
63 |
model_config_dict={"temperature": 0},
|
64 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
}
|
66 |
|
67 |
# Configure toolkits
|
|
|
70 |
SearchToolkit().search_duckduckgo,
|
71 |
SearchToolkit().search_wiki,
|
72 |
*ExcelToolkit().get_tools(),
|
73 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
74 |
]
|
75 |
|
76 |
# Configure agent roles and parameters
|
|
|
100 |
r"""Main function to run the OWL system with an example question."""
|
101 |
# Example research question
|
102 |
question = (
|
103 |
+
"搜索OWL项目最近的新闻并生成一篇报告,最后保存到本地。"
|
|
|
|
|
104 |
)
|
105 |
|
106 |
# Construct and run the society
|
owl/run_gaia_roleplaying.py
CHANGED
@@ -27,6 +27,7 @@ from camel.toolkits import (
|
|
27 |
SearchToolkit,
|
28 |
VideoAnalysisToolkit,
|
29 |
WebToolkit,
|
|
|
30 |
)
|
31 |
from camel.types import ModelPlatformType, ModelType
|
32 |
from camel.configs import ChatGPTConfig
|
@@ -51,6 +52,8 @@ def main():
|
|
51 |
# Create cache directory
|
52 |
cache_dir = "tmp/"
|
53 |
os.makedirs(cache_dir, exist_ok=True)
|
|
|
|
|
54 |
|
55 |
# Create models for different components
|
56 |
models = {
|
@@ -101,6 +104,7 @@ def main():
|
|
101 |
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
102 |
*SearchToolkit().get_tools(),
|
103 |
*ExcelToolkit().get_tools(),
|
|
|
104 |
]
|
105 |
|
106 |
# Configure agent roles and parameters
|
@@ -127,8 +131,8 @@ def main():
|
|
127 |
)
|
128 |
|
129 |
# Output results
|
130 |
-
logger.
|
131 |
-
logger.
|
132 |
|
133 |
|
134 |
if __name__ == "__main__":
|
|
|
27 |
SearchToolkit,
|
28 |
VideoAnalysisToolkit,
|
29 |
WebToolkit,
|
30 |
+
FileWriteToolkit,
|
31 |
)
|
32 |
from camel.types import ModelPlatformType, ModelType
|
33 |
from camel.configs import ChatGPTConfig
|
|
|
52 |
# Create cache directory
|
53 |
cache_dir = "tmp/"
|
54 |
os.makedirs(cache_dir, exist_ok=True)
|
55 |
+
result_dir = "results/"
|
56 |
+
os.makedirs(result_dir, exist_ok=True)
|
57 |
|
58 |
# Create models for different components
|
59 |
models = {
|
|
|
104 |
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
105 |
*SearchToolkit().get_tools(),
|
106 |
*ExcelToolkit().get_tools(),
|
107 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
108 |
]
|
109 |
|
110 |
# Configure agent roles and parameters
|
|
|
131 |
)
|
132 |
|
133 |
# Output results
|
134 |
+
logger.info(f"Correct: {result['correct']}, Total: {result['total']}")
|
135 |
+
logger.info(f"Accuracy: {result['accuracy']}")
|
136 |
|
137 |
|
138 |
if __name__ == "__main__":
|
owl/run_mini.py
CHANGED
@@ -17,6 +17,7 @@ from camel.models import ModelFactory
|
|
17 |
from camel.toolkits import (
|
18 |
SearchToolkit,
|
19 |
WebToolkit,
|
|
|
20 |
)
|
21 |
from camel.types import ModelPlatformType, ModelType
|
22 |
from camel.logger import set_log_level
|
@@ -71,6 +72,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
71 |
).get_tools(),
|
72 |
SearchToolkit().search_duckduckgo,
|
73 |
SearchToolkit().search_wiki,
|
|
|
74 |
]
|
75 |
|
76 |
# Configure agent roles and parameters
|
|
|
17 |
from camel.toolkits import (
|
18 |
SearchToolkit,
|
19 |
WebToolkit,
|
20 |
+
FileWriteToolkit,
|
21 |
)
|
22 |
from camel.types import ModelPlatformType, ModelType
|
23 |
from camel.logger import set_log_level
|
|
|
72 |
).get_tools(),
|
73 |
SearchToolkit().search_duckduckgo,
|
74 |
SearchToolkit().search_wiki,
|
75 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
76 |
]
|
77 |
|
78 |
# Configure agent roles and parameters
|
owl/run_ollama.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
# run_ollama.py by tj-scripts(https://github.com/tj-scripts)
|
15 |
+
|
16 |
+
from dotenv import load_dotenv
|
17 |
+
from camel.models import ModelFactory
|
18 |
+
from camel.toolkits import (
|
19 |
+
CodeExecutionToolkit,
|
20 |
+
ExcelToolkit,
|
21 |
+
ImageAnalysisToolkit,
|
22 |
+
SearchToolkit,
|
23 |
+
WebToolkit,
|
24 |
+
FileWriteToolkit,
|
25 |
+
)
|
26 |
+
from camel.types import ModelPlatformType
|
27 |
+
|
28 |
+
from utils import OwlRolePlaying, run_society
|
29 |
+
|
30 |
+
from camel.logger import set_log_level
|
31 |
+
|
32 |
+
set_log_level(level="DEBUG")
|
33 |
+
|
34 |
+
load_dotenv()
|
35 |
+
|
36 |
+
|
37 |
+
def construct_society(question: str) -> OwlRolePlaying:
|
38 |
+
r"""Construct a society of agents based on the given question.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
question (str): The task or question to be addressed by the society.
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
OwlRolePlaying: A configured society of agents ready to address the question.
|
45 |
+
"""
|
46 |
+
|
47 |
+
# Create models for different components
|
48 |
+
models = {
|
49 |
+
"user": ModelFactory.create(
|
50 |
+
model_platform=ModelPlatformType.OLLAMA,
|
51 |
+
model_type="qwen2.5:72b",
|
52 |
+
url="http://localhost:11434/v1",
|
53 |
+
model_config_dict={"temperature": 0.8, "max_tokens": 1000000},
|
54 |
+
),
|
55 |
+
"assistant": ModelFactory.create(
|
56 |
+
model_platform=ModelPlatformType.OLLAMA,
|
57 |
+
model_type="qwen2.5:72b",
|
58 |
+
url="http://localhost:11434/v1",
|
59 |
+
model_config_dict={"temperature": 0.2, "max_tokens": 1000000},
|
60 |
+
),
|
61 |
+
"web": ModelFactory.create(
|
62 |
+
model_platform=ModelPlatformType.OLLAMA,
|
63 |
+
model_type="llava:latest",
|
64 |
+
url="http://localhost:11434/v1",
|
65 |
+
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
66 |
+
),
|
67 |
+
"planning": ModelFactory.create(
|
68 |
+
model_platform=ModelPlatformType.OLLAMA,
|
69 |
+
model_type="qwen2.5:72b",
|
70 |
+
url="http://localhost:11434/v1",
|
71 |
+
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
72 |
+
),
|
73 |
+
"image": ModelFactory.create(
|
74 |
+
model_platform=ModelPlatformType.OLLAMA,
|
75 |
+
model_type="llava:latest",
|
76 |
+
url="http://localhost:11434/v1",
|
77 |
+
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
78 |
+
),
|
79 |
+
}
|
80 |
+
|
81 |
+
# Configure toolkits
|
82 |
+
tools = [
|
83 |
+
*WebToolkit(
|
84 |
+
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
85 |
+
web_agent_model=models["web"],
|
86 |
+
planning_agent_model=models["planning"],
|
87 |
+
).get_tools(),
|
88 |
+
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
89 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
90 |
+
SearchToolkit().search_duckduckgo,
|
91 |
+
# SearchToolkit().search_google, # Comment this out if you don't have google search
|
92 |
+
SearchToolkit().search_wiki,
|
93 |
+
*ExcelToolkit().get_tools(),
|
94 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
95 |
+
]
|
96 |
+
|
97 |
+
# Configure agent roles and parameters
|
98 |
+
user_agent_kwargs = {"model": models["user"]}
|
99 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
100 |
+
|
101 |
+
# Configure task parameters
|
102 |
+
task_kwargs = {
|
103 |
+
"task_prompt": question,
|
104 |
+
"with_task_specify": False,
|
105 |
+
}
|
106 |
+
|
107 |
+
# Create and return the society
|
108 |
+
society = OwlRolePlaying(
|
109 |
+
**task_kwargs,
|
110 |
+
user_role_name="user",
|
111 |
+
user_agent_kwargs=user_agent_kwargs,
|
112 |
+
assistant_role_name="assistant",
|
113 |
+
assistant_agent_kwargs=assistant_agent_kwargs,
|
114 |
+
)
|
115 |
+
|
116 |
+
return society
|
117 |
+
|
118 |
+
|
119 |
+
def main():
|
120 |
+
r"""Main function to run the OWL system with an example question."""
|
121 |
+
# Example research question
|
122 |
+
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
123 |
+
|
124 |
+
# Construct and run the society
|
125 |
+
society = construct_society(question)
|
126 |
+
answer, chat_history, token_count = run_society(society)
|
127 |
+
|
128 |
+
# Output the result
|
129 |
+
print(f"\033[94mAnswer: {answer}\033[0m")
|
130 |
+
|
131 |
+
|
132 |
+
if __name__ == "__main__":
|
133 |
+
main()
|
owl/run_openai_compatiable_model.py
CHANGED
@@ -21,6 +21,7 @@ from camel.toolkits import (
|
|
21 |
ImageAnalysisToolkit,
|
22 |
SearchToolkit,
|
23 |
WebToolkit,
|
|
|
24 |
)
|
25 |
from camel.types import ModelPlatformType
|
26 |
|
@@ -95,6 +96,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
95 |
SearchToolkit().search_google, # Comment this out if you don't have google search
|
96 |
SearchToolkit().search_wiki,
|
97 |
*ExcelToolkit().get_tools(),
|
|
|
98 |
]
|
99 |
|
100 |
# Configure agent roles and parameters
|
|
|
21 |
ImageAnalysisToolkit,
|
22 |
SearchToolkit,
|
23 |
WebToolkit,
|
24 |
+
FileWriteToolkit,
|
25 |
)
|
26 |
from camel.types import ModelPlatformType
|
27 |
|
|
|
96 |
SearchToolkit().search_google, # Comment this out if you don't have google search
|
97 |
SearchToolkit().search_wiki,
|
98 |
*ExcelToolkit().get_tools(),
|
99 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
100 |
]
|
101 |
|
102 |
# Configure agent roles and parameters
|
owl/run_qwen_mini_zh.py
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
from dotenv import load_dotenv
|
20 |
|
21 |
from camel.models import ModelFactory
|
22 |
-
from camel.toolkits import WebToolkit, SearchToolkit
|
23 |
from camel.types import ModelPlatformType, ModelType
|
24 |
|
25 |
from utils import OwlRolePlaying, run_society
|
@@ -39,19 +39,19 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
39 |
|
40 |
user_model = ModelFactory.create(
|
41 |
model_platform=ModelPlatformType.QWEN,
|
42 |
-
model_type=ModelType.
|
43 |
model_config_dict={"temperature": 0},
|
44 |
)
|
45 |
|
46 |
assistant_model = ModelFactory.create(
|
47 |
model_platform=ModelPlatformType.QWEN,
|
48 |
-
model_type=ModelType.
|
49 |
model_config_dict={"temperature": 0},
|
50 |
)
|
51 |
|
52 |
planning_model = ModelFactory.create(
|
53 |
model_platform=ModelPlatformType.QWEN,
|
54 |
-
model_type=ModelType.
|
55 |
model_config_dict={"temperature": 0},
|
56 |
)
|
57 |
|
@@ -69,6 +69,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
69 |
output_language="Chinese",
|
70 |
).get_tools(),
|
71 |
SearchToolkit().search_duckduckgo,
|
|
|
72 |
]
|
73 |
|
74 |
user_role_name = "user"
|
|
|
19 |
from dotenv import load_dotenv
|
20 |
|
21 |
from camel.models import ModelFactory
|
22 |
+
from camel.toolkits import WebToolkit, SearchToolkit, FileWriteToolkit
|
23 |
from camel.types import ModelPlatformType, ModelType
|
24 |
|
25 |
from utils import OwlRolePlaying, run_society
|
|
|
39 |
|
40 |
user_model = ModelFactory.create(
|
41 |
model_platform=ModelPlatformType.QWEN,
|
42 |
+
model_type=ModelType.QWEN_MAX,
|
43 |
model_config_dict={"temperature": 0},
|
44 |
)
|
45 |
|
46 |
assistant_model = ModelFactory.create(
|
47 |
model_platform=ModelPlatformType.QWEN,
|
48 |
+
model_type=ModelType.QWEN_MAX,
|
49 |
model_config_dict={"temperature": 0},
|
50 |
)
|
51 |
|
52 |
planning_model = ModelFactory.create(
|
53 |
model_platform=ModelPlatformType.QWEN,
|
54 |
+
model_type=ModelType.QWEN_MAX,
|
55 |
model_config_dict={"temperature": 0},
|
56 |
)
|
57 |
|
|
|
69 |
output_language="Chinese",
|
70 |
).get_tools(),
|
71 |
SearchToolkit().search_duckduckgo,
|
72 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
73 |
]
|
74 |
|
75 |
user_role_name = "user"
|
owl/run_qwen_zh.py
CHANGED
@@ -25,6 +25,7 @@ from camel.toolkits import (
|
|
25 |
SearchToolkit,
|
26 |
VideoAnalysisToolkit,
|
27 |
WebToolkit,
|
|
|
28 |
)
|
29 |
from camel.types import ModelPlatformType, ModelType
|
30 |
|
@@ -52,12 +53,12 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
52 |
models = {
|
53 |
"user": ModelFactory.create(
|
54 |
model_platform=ModelPlatformType.QWEN,
|
55 |
-
model_type=ModelType.
|
56 |
model_config_dict={"temperature": 0},
|
57 |
),
|
58 |
"assistant": ModelFactory.create(
|
59 |
model_platform=ModelPlatformType.QWEN,
|
60 |
-
model_type=ModelType.
|
61 |
model_config_dict={"temperature": 0},
|
62 |
),
|
63 |
"web": ModelFactory.create(
|
@@ -67,7 +68,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
67 |
),
|
68 |
"planning": ModelFactory.create(
|
69 |
model_platform=ModelPlatformType.QWEN,
|
70 |
-
model_type=ModelType.
|
71 |
model_config_dict={"temperature": 0},
|
72 |
),
|
73 |
"video": ModelFactory.create(
|
@@ -103,6 +104,7 @@ def construct_society(question: str) -> OwlRolePlaying:
|
|
103 |
SearchToolkit().search_wiki,
|
104 |
*ExcelToolkit().get_tools(),
|
105 |
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
|
|
106 |
]
|
107 |
|
108 |
# Configure agent roles and parameters
|
|
|
25 |
SearchToolkit,
|
26 |
VideoAnalysisToolkit,
|
27 |
WebToolkit,
|
28 |
+
FileWriteToolkit,
|
29 |
)
|
30 |
from camel.types import ModelPlatformType, ModelType
|
31 |
|
|
|
53 |
models = {
|
54 |
"user": ModelFactory.create(
|
55 |
model_platform=ModelPlatformType.QWEN,
|
56 |
+
model_type=ModelType.QWEN_MAX,
|
57 |
model_config_dict={"temperature": 0},
|
58 |
),
|
59 |
"assistant": ModelFactory.create(
|
60 |
model_platform=ModelPlatformType.QWEN,
|
61 |
+
model_type=ModelType.QWEN_MAX,
|
62 |
model_config_dict={"temperature": 0},
|
63 |
),
|
64 |
"web": ModelFactory.create(
|
|
|
68 |
),
|
69 |
"planning": ModelFactory.create(
|
70 |
model_platform=ModelPlatformType.QWEN,
|
71 |
+
model_type=ModelType.QWEN_MAX,
|
72 |
model_config_dict={"temperature": 0},
|
73 |
),
|
74 |
"video": ModelFactory.create(
|
|
|
104 |
SearchToolkit().search_wiki,
|
105 |
*ExcelToolkit().get_tools(),
|
106 |
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
107 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
108 |
]
|
109 |
|
110 |
# Configure agent roles and parameters
|
owl/run_terminal.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
from dotenv import load_dotenv
|
15 |
+
import os
|
16 |
+
from camel.models import ModelFactory
|
17 |
+
from camel.toolkits import (
|
18 |
+
SearchToolkit,
|
19 |
+
WebToolkit,
|
20 |
+
FileWriteToolkit,
|
21 |
+
TerminalToolkit
|
22 |
+
)
|
23 |
+
from camel.types import ModelPlatformType, ModelType
|
24 |
+
from camel.logger import set_log_level
|
25 |
+
|
26 |
+
from utils import OwlRolePlaying, run_society
|
27 |
+
|
28 |
+
load_dotenv()
|
29 |
+
set_log_level(level="DEBUG")
|
30 |
+
# Get current script directory
|
31 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
32 |
+
|
33 |
+
def construct_society(question: str) -> OwlRolePlaying:
|
34 |
+
r"""Construct a society of agents based on the given question.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
question (str): The task or question to be addressed by the society.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
OwlRolePlaying: A configured society of agents ready to address the
|
41 |
+
question.
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Create models for different components
|
45 |
+
models = {
|
46 |
+
"user": ModelFactory.create(
|
47 |
+
model_platform=ModelPlatformType.OPENAI,
|
48 |
+
model_type=ModelType.GPT_4O,
|
49 |
+
model_config_dict={"temperature": 0},
|
50 |
+
),
|
51 |
+
"assistant": ModelFactory.create(
|
52 |
+
model_platform=ModelPlatformType.OPENAI,
|
53 |
+
model_type=ModelType.GPT_4O,
|
54 |
+
model_config_dict={"temperature": 0},
|
55 |
+
),
|
56 |
+
"web": ModelFactory.create(
|
57 |
+
model_platform=ModelPlatformType.OPENAI,
|
58 |
+
model_type=ModelType.GPT_4O,
|
59 |
+
model_config_dict={"temperature": 0},
|
60 |
+
),
|
61 |
+
"planning": ModelFactory.create(
|
62 |
+
model_platform=ModelPlatformType.OPENAI,
|
63 |
+
model_type=ModelType.GPT_4O,
|
64 |
+
model_config_dict={"temperature": 0},
|
65 |
+
),
|
66 |
+
}
|
67 |
+
|
68 |
+
# Configure toolkits
|
69 |
+
tools = [
|
70 |
+
*WebToolkit(
|
71 |
+
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
72 |
+
web_agent_model=models["web"],
|
73 |
+
planning_agent_model=models["planning"],
|
74 |
+
).get_tools(),
|
75 |
+
SearchToolkit().search_duckduckgo,
|
76 |
+
SearchToolkit().search_wiki,
|
77 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
78 |
+
*TerminalToolkit().get_tools(),
|
79 |
+
]
|
80 |
+
|
81 |
+
# Configure agent roles and parameters
|
82 |
+
user_agent_kwargs = {"model": models["user"]}
|
83 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
84 |
+
|
85 |
+
# Configure task parameters
|
86 |
+
task_kwargs = {
|
87 |
+
"task_prompt": question,
|
88 |
+
"with_task_specify": False,
|
89 |
+
}
|
90 |
+
|
91 |
+
# Create and return the society
|
92 |
+
society = OwlRolePlaying(
|
93 |
+
**task_kwargs,
|
94 |
+
user_role_name="user",
|
95 |
+
user_agent_kwargs=user_agent_kwargs,
|
96 |
+
assistant_role_name="assistant",
|
97 |
+
assistant_agent_kwargs=assistant_agent_kwargs,
|
98 |
+
)
|
99 |
+
|
100 |
+
return society
|
101 |
+
|
102 |
+
|
103 |
+
def main():
|
104 |
+
r"""Main function to run the OWL system with an example question."""
|
105 |
+
# Example research question
|
106 |
+
question = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai,
|
107 |
+
and write the numbers into a Python file using the plot package,
|
108 |
+
save it to "+{os.path.join(base_dir, 'final_output')}+",
|
109 |
+
and execute the Python file with the local terminal to display the graph for me."""
|
110 |
+
|
111 |
+
# Construct and run the society
|
112 |
+
society = construct_society(question)
|
113 |
+
answer, chat_history, token_count = run_society(society)
|
114 |
+
|
115 |
+
# Output the result
|
116 |
+
print(f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m")
|
117 |
+
|
118 |
+
|
119 |
+
if __name__ == "__main__":
|
120 |
+
main()
|
owl/run_terminal_zh.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
from dotenv import load_dotenv
|
15 |
+
|
16 |
+
from camel.models import ModelFactory
|
17 |
+
from camel.toolkits import (
|
18 |
+
SearchToolkit,
|
19 |
+
WebToolkit,
|
20 |
+
FileWriteToolkit,
|
21 |
+
TerminalToolkit
|
22 |
+
)
|
23 |
+
from camel.types import ModelPlatformType, ModelType
|
24 |
+
from camel.logger import set_log_level
|
25 |
+
|
26 |
+
from utils import OwlRolePlaying, run_society
|
27 |
+
|
28 |
+
load_dotenv()
|
29 |
+
set_log_level(level="DEBUG")
|
30 |
+
import os
|
31 |
+
# Get current script directory
|
32 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
33 |
+
|
34 |
+
def construct_society(question: str) -> OwlRolePlaying:
|
35 |
+
r"""Construct a society of agents based on the given question.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
question (str): The task or question to be addressed by the society.
|
39 |
+
|
40 |
+
Returns:
|
41 |
+
OwlRolePlaying: A configured society of agents ready to address the
|
42 |
+
question.
|
43 |
+
"""
|
44 |
+
|
45 |
+
# Create models for different components
|
46 |
+
models = {
|
47 |
+
"user": ModelFactory.create(
|
48 |
+
model_platform=ModelPlatformType.OPENAI,
|
49 |
+
model_type=ModelType.GPT_4O,
|
50 |
+
model_config_dict={"temperature": 0},
|
51 |
+
),
|
52 |
+
"assistant": ModelFactory.create(
|
53 |
+
model_platform=ModelPlatformType.OPENAI,
|
54 |
+
model_type=ModelType.GPT_4O,
|
55 |
+
model_config_dict={"temperature": 0},
|
56 |
+
),
|
57 |
+
"web": ModelFactory.create(
|
58 |
+
model_platform=ModelPlatformType.OPENAI,
|
59 |
+
model_type=ModelType.GPT_4O,
|
60 |
+
model_config_dict={"temperature": 0},
|
61 |
+
),
|
62 |
+
"planning": ModelFactory.create(
|
63 |
+
model_platform=ModelPlatformType.OPENAI,
|
64 |
+
model_type=ModelType.GPT_4O,
|
65 |
+
model_config_dict={"temperature": 0},
|
66 |
+
),
|
67 |
+
}
|
68 |
+
|
69 |
+
# Configure toolkits
|
70 |
+
tools = [
|
71 |
+
*WebToolkit(
|
72 |
+
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
73 |
+
web_agent_model=models["web"],
|
74 |
+
planning_agent_model=models["planning"],
|
75 |
+
).get_tools(),
|
76 |
+
SearchToolkit().search_duckduckgo,
|
77 |
+
SearchToolkit().search_wiki,
|
78 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
79 |
+
*TerminalToolkit().get_tools(),
|
80 |
+
]
|
81 |
+
|
82 |
+
# Configure agent roles and parameters
|
83 |
+
user_agent_kwargs = {"model": models["user"]}
|
84 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
85 |
+
|
86 |
+
# Configure task parameters
|
87 |
+
task_kwargs = {
|
88 |
+
"task_prompt": question,
|
89 |
+
"with_task_specify": False,
|
90 |
+
}
|
91 |
+
|
92 |
+
# Create and return the society
|
93 |
+
society = OwlRolePlaying(
|
94 |
+
**task_kwargs,
|
95 |
+
user_role_name="user",
|
96 |
+
user_agent_kwargs=user_agent_kwargs,
|
97 |
+
assistant_role_name="assistant",
|
98 |
+
assistant_agent_kwargs=assistant_agent_kwargs,
|
99 |
+
)
|
100 |
+
|
101 |
+
return society
|
102 |
+
|
103 |
+
|
104 |
+
def main():
|
105 |
+
r"""Main function to run the OWL system with an example question."""
|
106 |
+
# Example research question
|
107 |
+
question = f"""打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到"+{os.path.join
|
108 |
+
(base_dir, 'final_output')}+",用本地终端执行python文件显示图出来给我"""
|
109 |
+
|
110 |
+
# Construct and run the society
|
111 |
+
society = construct_society(question)
|
112 |
+
answer, chat_history, token_count = run_society(society)
|
113 |
+
|
114 |
+
# Output the result
|
115 |
+
print(f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m")
|
116 |
+
|
117 |
+
|
118 |
+
if __name__ == "__main__":
|
119 |
+
main()
|
owl/script_adapter.py
CHANGED
@@ -68,7 +68,11 @@ def run_script_with_env_question(script_name):
|
|
68 |
|
69 |
# 转义问题中的特殊字符
|
70 |
escaped_question = (
|
71 |
-
question.replace("\\", "\\\\")
|
|
|
|
|
|
|
|
|
72 |
)
|
73 |
|
74 |
# 查找脚本中所有的question赋值 - 改进的正则表达式
|
|
|
68 |
|
69 |
# 转义问题中的特殊字符
|
70 |
escaped_question = (
|
71 |
+
question.replace("\\", "\\\\")
|
72 |
+
.replace('"', '\\"')
|
73 |
+
.replace("'", "\\'")
|
74 |
+
.replace("\n", "\\n") # 转义换行符
|
75 |
+
.replace("\r", "\\r") # 转义回车符
|
76 |
)
|
77 |
|
78 |
# 查找脚本中所有的question赋值 - 改进的正则表达式
|
owl/utils/enhanced_role_playing.py
CHANGED
@@ -193,7 +193,7 @@ Please note that our overall task may be very complicated. Here are some tips th
|
|
193 |
- When trying to solve math problems, you can try to write python code and use sympy library to solve the problem.
|
194 |
- Always verify the accuracy of your final answers! Try cross-checking the answers by other ways. (e.g., screenshots, webpage analysis, etc.).
|
195 |
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
|
196 |
-
- After writing codes, do not forget to run the code and get the result. If it encounters an error, try to debug it.
|
197 |
- When a tool fails to run, or the code does not run correctly, never assume that it returns the correct result and continue to reason based on the assumption, because the assumed result cannot lead you to the correct answer. The right way is to think about the reason for the error and try again.
|
198 |
- Search results typically do not provide precise answers. It is not likely to find the answer directly using search toolkit only, the search query should be concise and focuses on finding sources rather than direct answers, as it always need to use other tools to further process the url, e.g. interact with the webpage, extract webpage content, etc.
|
199 |
- For downloading files, you can either use the web browser simulation toolkit or write codes.
|
@@ -369,11 +369,6 @@ class OwlGAIARolePlaying(OwlRolePlaying):
|
|
369 |
),
|
370 |
)
|
371 |
user_msg = self._reduce_message_options(user_response.msgs)
|
372 |
-
if (
|
373 |
-
"n" in self.user_agent.model_config_dict.keys()
|
374 |
-
and self.user_agent.model_config_dict["n"] > 1
|
375 |
-
):
|
376 |
-
self.user_agent.record_message(user_msg)
|
377 |
|
378 |
modified_user_msg = deepcopy(user_msg)
|
379 |
|
|
|
193 |
- When trying to solve math problems, you can try to write python code and use sympy library to solve the problem.
|
194 |
- Always verify the accuracy of your final answers! Try cross-checking the answers by other ways. (e.g., screenshots, webpage analysis, etc.).
|
195 |
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
|
196 |
+
- After writing codes, do not forget to run the code and get the result. If it encounters an error, try to debug it. Also, bear in mind that the code execution environment does not support interactive input.
|
197 |
- When a tool fails to run, or the code does not run correctly, never assume that it returns the correct result and continue to reason based on the assumption, because the assumed result cannot lead you to the correct answer. The right way is to think about the reason for the error and try again.
|
198 |
- Search results typically do not provide precise answers. It is not likely to find the answer directly using search toolkit only, the search query should be concise and focuses on finding sources rather than direct answers, as it always need to use other tools to further process the url, e.g. interact with the webpage, extract webpage content, etc.
|
199 |
- For downloading files, you can either use the web browser simulation toolkit or write codes.
|
|
|
369 |
),
|
370 |
)
|
371 |
user_msg = self._reduce_message_options(user_response.msgs)
|
|
|
|
|
|
|
|
|
|
|
372 |
|
373 |
modified_user_msg = deepcopy(user_msg)
|
374 |
|
owl/utils/gaia.py
CHANGED
@@ -195,7 +195,7 @@ class GAIABenchmark(BaseBenchmark):
|
|
195 |
# Process tasks
|
196 |
for task in tqdm(datas, desc="Running"):
|
197 |
if self._check_task_completed(task["task_id"]):
|
198 |
-
logger.
|
199 |
f"The following task is already completed:\n task id: {task['task_id']}, question: {task['Question']}"
|
200 |
)
|
201 |
continue
|
|
|
195 |
# Process tasks
|
196 |
for task in tqdm(datas, desc="Running"):
|
197 |
if self._check_task_completed(task["task_id"]):
|
198 |
+
logger.info(
|
199 |
f"The following task is already completed:\n task id: {task['task_id']}, question: {task['Question']}"
|
200 |
)
|
201 |
continue
|
pyproject.toml
CHANGED
@@ -21,7 +21,7 @@ keywords = [
|
|
21 |
"learning-systems"
|
22 |
]
|
23 |
dependencies = [
|
24 |
-
"camel-ai[all]==0.2.
|
25 |
"chunkr-ai>=0.0.41",
|
26 |
"docx2markdown>=0.1.1",
|
27 |
"gradio>=3.50.2",
|
|
|
21 |
"learning-systems"
|
22 |
]
|
23 |
dependencies = [
|
24 |
+
"camel-ai[all]==0.2.27",
|
25 |
"chunkr-ai>=0.0.41",
|
26 |
"docx2markdown>=0.1.1",
|
27 |
"gradio>=3.50.2",
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
camel-ai[all]==0.2.
|
2 |
chunkr-ai>=0.0.41
|
3 |
docx2markdown>=0.1.1
|
4 |
gradio>=3.50.2
|
|
|
1 |
+
camel-ai[all]==0.2.27
|
2 |
chunkr-ai>=0.0.41
|
3 |
docx2markdown>=0.1.1
|
4 |
gradio>=3.50.2
|
run_app.py
CHANGED
@@ -15,40 +15,43 @@
|
|
15 |
# -*- coding: utf-8 -*-
|
16 |
|
17 |
"""
|
18 |
-
OWL
|
19 |
"""
|
20 |
|
21 |
import os
|
22 |
import sys
|
23 |
from pathlib import Path
|
24 |
|
|
|
25 |
|
26 |
def main():
|
27 |
-
"""
|
28 |
-
#
|
29 |
project_root = Path(__file__).resolve().parent
|
30 |
os.chdir(project_root)
|
31 |
|
32 |
-
#
|
33 |
log_dir = project_root / "logs"
|
34 |
log_dir.mkdir(exist_ok=True)
|
35 |
|
36 |
-
#
|
37 |
sys.path.insert(0, str(project_root))
|
38 |
|
39 |
try:
|
40 |
-
from owl.
|
41 |
|
42 |
-
#
|
43 |
app = create_ui()
|
44 |
app.queue().launch(share=False)
|
45 |
|
46 |
except ImportError as e:
|
47 |
-
print(
|
48 |
-
|
|
|
|
|
49 |
sys.exit(1)
|
50 |
except Exception as e:
|
51 |
-
print(f"
|
52 |
import traceback
|
53 |
|
54 |
traceback.print_exc()
|
|
|
15 |
# -*- coding: utf-8 -*-
|
16 |
|
17 |
"""
|
18 |
+
OWL Intelligent Assistant Platform Launch Script
|
19 |
"""
|
20 |
|
21 |
import os
|
22 |
import sys
|
23 |
from pathlib import Path
|
24 |
|
25 |
+
os.environ['PYTHONIOENCODING'] = 'utf-8'
|
26 |
|
27 |
def main():
|
28 |
+
"""Main function to launch the OWL Intelligent Assistant Platform"""
|
29 |
+
# Ensure the current directory is the project root
|
30 |
project_root = Path(__file__).resolve().parent
|
31 |
os.chdir(project_root)
|
32 |
|
33 |
+
# Create log directory
|
34 |
log_dir = project_root / "logs"
|
35 |
log_dir.mkdir(exist_ok=True)
|
36 |
|
37 |
+
# Add project root to Python path
|
38 |
sys.path.insert(0, str(project_root))
|
39 |
|
40 |
try:
|
41 |
+
from owl.app_en import create_ui
|
42 |
|
43 |
+
# Create and launch the application
|
44 |
app = create_ui()
|
45 |
app.queue().launch(share=False)
|
46 |
|
47 |
except ImportError as e:
|
48 |
+
print(
|
49 |
+
f"Error: Unable to import necessary modules. Please ensure all dependencies are installed: {e}"
|
50 |
+
)
|
51 |
+
print("Tip: Run 'pip install -r requirements.txt' to install all dependencies")
|
52 |
sys.exit(1)
|
53 |
except Exception as e:
|
54 |
+
print(f"Error occurred while starting the application: {e}")
|
55 |
import traceback
|
56 |
|
57 |
traceback.print_exc()
|
run_app_zh.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
#!/usr/bin/env python
|
15 |
+
# -*- coding: utf-8 -*-
|
16 |
+
|
17 |
+
"""
|
18 |
+
OWL 智能助手运行平台启动脚本
|
19 |
+
"""
|
20 |
+
|
21 |
+
import os
|
22 |
+
import sys
|
23 |
+
from pathlib import Path
|
24 |
+
|
25 |
+
os.environ['PYTHONIOENCODING'] = 'utf-8'
|
26 |
+
|
27 |
+
def main():
|
28 |
+
"""主函数,启动OWL智能助手运行平台"""
|
29 |
+
# 确保当前目录是项目根目录
|
30 |
+
project_root = Path(__file__).resolve().parent
|
31 |
+
os.chdir(project_root)
|
32 |
+
|
33 |
+
# 创建日志目录
|
34 |
+
log_dir = project_root / "logs"
|
35 |
+
log_dir.mkdir(exist_ok=True)
|
36 |
+
|
37 |
+
# 导入并运行应用
|
38 |
+
sys.path.insert(0, str(project_root))
|
39 |
+
|
40 |
+
try:
|
41 |
+
from owl.app import create_ui
|
42 |
+
|
43 |
+
# 创建并启动应用
|
44 |
+
app = create_ui()
|
45 |
+
app.queue().launch(share=False)
|
46 |
+
|
47 |
+
except ImportError as e:
|
48 |
+
print(f"错误: 无法导入必要的模块。请确保已安装所有依赖项: {e}")
|
49 |
+
print("提示: 运行 'pip install -r requirements.txt' 安装所有依赖项")
|
50 |
+
sys.exit(1)
|
51 |
+
except Exception as e:
|
52 |
+
print(f"启动应用程序时出错: {e}")
|
53 |
+
import traceback
|
54 |
+
|
55 |
+
traceback.print_exc()
|
56 |
+
sys.exit(1)
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
main()
|
uv.lock
CHANGED
@@ -204,7 +204,7 @@ wheels = [
|
|
204 |
|
205 |
[[package]]
|
206 |
name = "anthropic"
|
207 |
-
version = "0.
|
208 |
source = { registry = "https://pypi.org/simple" }
|
209 |
dependencies = [
|
210 |
{ name = "anyio" },
|
@@ -215,9 +215,9 @@ dependencies = [
|
|
215 |
{ name = "sniffio" },
|
216 |
{ name = "typing-extensions" },
|
217 |
]
|
218 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
219 |
wheels = [
|
220 |
-
{ url = "https://files.pythonhosted.org/packages/
|
221 |
]
|
222 |
|
223 |
[[package]]
|
@@ -482,7 +482,7 @@ wheels = [
|
|
482 |
|
483 |
[[package]]
|
484 |
name = "camel-ai"
|
485 |
-
version = "0.2.
|
486 |
source = { registry = "https://pypi.org/simple" }
|
487 |
dependencies = [
|
488 |
{ name = "colorama" },
|
@@ -499,9 +499,9 @@ dependencies = [
|
|
499 |
{ name = "pyyaml" },
|
500 |
{ name = "tiktoken" },
|
501 |
]
|
502 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
503 |
wheels = [
|
504 |
-
{ url = "https://files.pythonhosted.org/packages/
|
505 |
]
|
506 |
|
507 |
[package.optional-dependencies]
|
@@ -524,6 +524,7 @@ all = [
|
|
524 |
{ name = "diffusers" },
|
525 |
{ name = "discord-py" },
|
526 |
{ name = "docker" },
|
|
|
527 |
{ name = "docx2txt" },
|
528 |
{ name = "duckduckgo-search" },
|
529 |
{ name = "e2b-code-interpreter" },
|
@@ -531,6 +532,7 @@ all = [
|
|
531 |
{ name = "ffmpeg-python" },
|
532 |
{ name = "firecrawl-py" },
|
533 |
{ name = "fish-audio-sdk" },
|
|
|
534 |
{ name = "google-cloud-storage" },
|
535 |
{ name = "googlemaps" },
|
536 |
{ name = "gradio" },
|
@@ -540,6 +542,7 @@ all = [
|
|
540 |
{ name = "jupyter-client" },
|
541 |
{ name = "linkup-sdk" },
|
542 |
{ name = "litellm" },
|
|
|
543 |
{ name = "mistralai" },
|
544 |
{ name = "mock" },
|
545 |
{ name = "mypy" },
|
@@ -592,6 +595,7 @@ all = [
|
|
592 |
{ name = "transformers" },
|
593 |
{ name = "tree-sitter" },
|
594 |
{ name = "tree-sitter-python" },
|
|
|
595 |
{ name = "types-colorama" },
|
596 |
{ name = "types-mock" },
|
597 |
{ name = "types-pyyaml" },
|
@@ -1213,6 +1217,16 @@ wheels = [
|
|
1213 |
{ url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 },
|
1214 |
]
|
1215 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1216 |
[[package]]
|
1217 |
name = "docx2markdown"
|
1218 |
version = "0.1.1"
|
@@ -1540,6 +1554,12 @@ wheels = [
|
|
1540 |
{ url = "https://files.pythonhosted.org/packages/bf/ff/44934a031ce5a39125415eb405b9efb76fe7f9586b75291d66ae5cbfc4e6/fonttools-4.56.0-py3-none-any.whl", hash = "sha256:1088182f68c303b50ca4dc0c82d42083d176cba37af1937e1a976a31149d4d14", size = 1089800 },
|
1541 |
]
|
1542 |
|
|
|
|
|
|
|
|
|
|
|
|
|
1543 |
[[package]]
|
1544 |
name = "free-proxy"
|
1545 |
version = "1.1.3"
|
@@ -2653,6 +2673,18 @@ wheels = [
|
|
2653 |
{ url = "https://files.pythonhosted.org/packages/83/29/00b9b0322a473aee6cda87473401c9abb19506cd650cc69a8aa38277ea74/lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499", size = 3487718 },
|
2654 |
]
|
2655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2656 |
[[package]]
|
2657 |
name = "markupsafe"
|
2658 |
version = "2.1.5"
|
@@ -2755,6 +2787,34 @@ wheels = [
|
|
2755 |
{ url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 },
|
2756 |
]
|
2757 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2758 |
[[package]]
|
2759 |
name = "milvus-lite"
|
2760 |
version = "2.4.11"
|
@@ -3515,7 +3575,7 @@ dependencies = [
|
|
3515 |
|
3516 |
[package.metadata]
|
3517 |
requires-dist = [
|
3518 |
-
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.
|
3519 |
{ name = "chunkr-ai", specifier = ">=0.0.41" },
|
3520 |
{ name = "docx2markdown", specifier = ">=0.1.1" },
|
3521 |
{ name = "gradio", specifier = ">=3.50.2" },
|
@@ -4097,6 +4157,19 @@ wheels = [
|
|
4097 |
{ url = "https://files.pythonhosted.org/packages/a9/f9/b6bcaf874f410564a78908739c80861a171788ef4d4f76f5009656672dfe/pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753", size = 1920344 },
|
4098 |
]
|
4099 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4100 |
[[package]]
|
4101 |
name = "pydub"
|
4102 |
version = "0.25.1"
|
@@ -4184,6 +4257,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/06/47/b61c1c44b87cbdaee
|
|
4184 |
wheels = [
|
4185 |
{ url = "https://files.pythonhosted.org/packages/61/9b/98ef4b98309e9db3baa9fe572f0e61b6130bb9852d13189970f35b703499/pymupdf-1.25.3-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:96878e1b748f9c2011aecb2028c5f96b5a347a9a91169130ad0133053d97915e", size = 19343576 },
|
4186 |
{ url = "https://files.pythonhosted.org/packages/14/62/4e12126db174c8cfbf692281cda971cc4046c5f5226032c2cfaa6f83e08d/pymupdf-1.25.3-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:6ef753005b72ebfd23470f72f7e30f61e21b0b5e748045ec5b8f89e6e3068d62", size = 18580114 },
|
|
|
4187 |
{ url = "https://files.pythonhosted.org/packages/52/de/bd1418e31f73d37b8381cd5deacfd681e6be702b8890e123e83724569ee1/pymupdf-1.25.3-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46d90c4f9e62d1856e8db4b9f04a202ff4a7f086a816af73abdc86adb7f5e25a", size = 19999825 },
|
4188 |
{ url = "https://files.pythonhosted.org/packages/42/ee/3c449b0de061440ba1ac984aa845315e9e2dca0ff2003c5adfc6febff203/pymupdf-1.25.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a5de51efdbe4d486b6c1111c84e8a231cbfb426f3d6ff31ab530ad70e6f39756", size = 21123157 },
|
4189 |
{ url = "https://files.pythonhosted.org/packages/83/53/71faaaf91c56f2883b13f3dd849bf2697f012eb35eb7b952d62734cff41f/pymupdf-1.25.3-cp39-abi3-win32.whl", hash = "sha256:bca72e6089f985d800596e22973f79cc08af6cbff1d93e5bda9248326a03857c", size = 15094211 },
|
@@ -4766,6 +4840,20 @@ wheels = [
|
|
4766 |
{ url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490 },
|
4767 |
]
|
4768 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4769 |
[[package]]
|
4770 |
name = "roman-numerals-py"
|
4771 |
version = "3.1.0"
|
@@ -5129,6 +5217,15 @@ version = "1.0.0"
|
|
5129 |
source = { registry = "https://pypi.org/simple" }
|
5130 |
sdist = { url = "https://files.pythonhosted.org/packages/9e/bd/3704a8c3e0942d711c1299ebf7b9091930adae6675d7c8f476a7ce48653c/sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9", size = 5750 }
|
5131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5132 |
[[package]]
|
5133 |
name = "six"
|
5134 |
version = "1.17.0"
|
@@ -5454,6 +5551,19 @@ wheels = [
|
|
5454 |
{ url = "https://files.pythonhosted.org/packages/6b/ed/8bc1d54387434f4c1b99a54721691444e9e249bb728a0da47b3150c756d6/sqlglotrs-0.3.0-cp312-none-win_amd64.whl", hash = "sha256:b9f308732f12331f06c53fcb1d7c2b135a43aa22486b4c88c26d42710f329448", size = 190557 },
|
5455 |
]
|
5456 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5457 |
[[package]]
|
5458 |
name = "stack-data"
|
5459 |
version = "0.6.3"
|
@@ -5898,6 +6008,21 @@ wheels = [
|
|
5898 |
{ url = "https://files.pythonhosted.org/packages/fe/7b/7757205dee3628f75e7991021d15cd1bd0c9b044ca9affe99b50879fc0e1/triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb", size = 209464695 },
|
5899 |
]
|
5900 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5901 |
[[package]]
|
5902 |
name = "types-colorama"
|
5903 |
version = "0.4.15.20240311"
|
|
|
204 |
|
205 |
[[package]]
|
206 |
name = "anthropic"
|
207 |
+
version = "0.49.0"
|
208 |
source = { registry = "https://pypi.org/simple" }
|
209 |
dependencies = [
|
210 |
{ name = "anyio" },
|
|
|
215 |
{ name = "sniffio" },
|
216 |
{ name = "typing-extensions" },
|
217 |
]
|
218 |
+
sdist = { url = "https://files.pythonhosted.org/packages/86/e3/a88c8494ce4d1a88252b9e053607e885f9b14d0a32273d47b727cbee4228/anthropic-0.49.0.tar.gz", hash = "sha256:c09e885b0f674b9119b4f296d8508907f6cff0009bc20d5cf6b35936c40b4398", size = 210016 }
|
219 |
wheels = [
|
220 |
+
{ url = "https://files.pythonhosted.org/packages/76/74/5d90ad14d55fbe3f9c474fdcb6e34b4bed99e3be8efac98734a5ddce88c1/anthropic-0.49.0-py3-none-any.whl", hash = "sha256:bbc17ad4e7094988d2fa86b87753ded8dce12498f4b85fe5810f208f454a8375", size = 243368 },
|
221 |
]
|
222 |
|
223 |
[[package]]
|
|
|
482 |
|
483 |
[[package]]
|
484 |
name = "camel-ai"
|
485 |
+
version = "0.2.27"
|
486 |
source = { registry = "https://pypi.org/simple" }
|
487 |
dependencies = [
|
488 |
{ name = "colorama" },
|
|
|
499 |
{ name = "pyyaml" },
|
500 |
{ name = "tiktoken" },
|
501 |
]
|
502 |
+
sdist = { url = "https://files.pythonhosted.org/packages/ff/27/2bce666ae7f7d0db276d037b3afe84a460e782438e5cacc08de20417233b/camel_ai-0.2.27.tar.gz", hash = "sha256:4689245ad48f51e5e602d2651cf463afe212bcf046633a19c2189574c1f3481a", size = 441363 }
|
503 |
wheels = [
|
504 |
+
{ url = "https://files.pythonhosted.org/packages/b0/fa/94f5b41cb6babc81aac00494b170ec2bea058b6c00f477ceb3e886c49177/camel_ai-0.2.27-py3-none-any.whl", hash = "sha256:c4a6597791faf2f2161c56c2579e60850557b126135b29af77ebd08fa0774e0b", size = 746387 },
|
505 |
]
|
506 |
|
507 |
[package.optional-dependencies]
|
|
|
524 |
{ name = "diffusers" },
|
525 |
{ name = "discord-py" },
|
526 |
{ name = "docker" },
|
527 |
+
{ name = "docx" },
|
528 |
{ name = "docx2txt" },
|
529 |
{ name = "duckduckgo-search" },
|
530 |
{ name = "e2b-code-interpreter" },
|
|
|
532 |
{ name = "ffmpeg-python" },
|
533 |
{ name = "firecrawl-py" },
|
534 |
{ name = "fish-audio-sdk" },
|
535 |
+
{ name = "fpdf" },
|
536 |
{ name = "google-cloud-storage" },
|
537 |
{ name = "googlemaps" },
|
538 |
{ name = "gradio" },
|
|
|
542 |
{ name = "jupyter-client" },
|
543 |
{ name = "linkup-sdk" },
|
544 |
{ name = "litellm" },
|
545 |
+
{ name = "mcp" },
|
546 |
{ name = "mistralai" },
|
547 |
{ name = "mock" },
|
548 |
{ name = "mypy" },
|
|
|
595 |
{ name = "transformers" },
|
596 |
{ name = "tree-sitter" },
|
597 |
{ name = "tree-sitter-python" },
|
598 |
+
{ name = "typer" },
|
599 |
{ name = "types-colorama" },
|
600 |
{ name = "types-mock" },
|
601 |
{ name = "types-pyyaml" },
|
|
|
1217 |
{ url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 },
|
1218 |
]
|
1219 |
|
1220 |
+
[[package]]
|
1221 |
+
name = "docx"
|
1222 |
+
version = "0.2.4"
|
1223 |
+
source = { registry = "https://pypi.org/simple" }
|
1224 |
+
dependencies = [
|
1225 |
+
{ name = "lxml" },
|
1226 |
+
{ name = "pillow" },
|
1227 |
+
]
|
1228 |
+
sdist = { url = "https://files.pythonhosted.org/packages/4a/8e/5a01644697b03016de339ef444cfff28367f92984dc74eddaab1ed60eada/docx-0.2.4.tar.gz", hash = "sha256:9d7595eac6e86cda0b7136a2995318d039c1f3eaa368a3300805abbbe5dc8877", size = 54925 }
|
1229 |
+
|
1230 |
[[package]]
|
1231 |
name = "docx2markdown"
|
1232 |
version = "0.1.1"
|
|
|
1554 |
{ url = "https://files.pythonhosted.org/packages/bf/ff/44934a031ce5a39125415eb405b9efb76fe7f9586b75291d66ae5cbfc4e6/fonttools-4.56.0-py3-none-any.whl", hash = "sha256:1088182f68c303b50ca4dc0c82d42083d176cba37af1937e1a976a31149d4d14", size = 1089800 },
|
1555 |
]
|
1556 |
|
1557 |
+
[[package]]
|
1558 |
+
name = "fpdf"
|
1559 |
+
version = "1.7.2"
|
1560 |
+
source = { registry = "https://pypi.org/simple" }
|
1561 |
+
sdist = { url = "https://files.pythonhosted.org/packages/37/c6/608a9e6c172bf9124aa687ec8b9f0e8e5d697d59a5f4fad0e2d5ec2a7556/fpdf-1.7.2.tar.gz", hash = "sha256:125840783289e7d12552b1e86ab692c37322e7a65b96a99e0ea86cca041b6779", size = 39504 }
|
1562 |
+
|
1563 |
[[package]]
|
1564 |
name = "free-proxy"
|
1565 |
version = "1.1.3"
|
|
|
2673 |
{ url = "https://files.pythonhosted.org/packages/83/29/00b9b0322a473aee6cda87473401c9abb19506cd650cc69a8aa38277ea74/lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499", size = 3487718 },
|
2674 |
]
|
2675 |
|
2676 |
+
[[package]]
|
2677 |
+
name = "markdown-it-py"
|
2678 |
+
version = "3.0.0"
|
2679 |
+
source = { registry = "https://pypi.org/simple" }
|
2680 |
+
dependencies = [
|
2681 |
+
{ name = "mdurl" },
|
2682 |
+
]
|
2683 |
+
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 }
|
2684 |
+
wheels = [
|
2685 |
+
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
|
2686 |
+
]
|
2687 |
+
|
2688 |
[[package]]
|
2689 |
name = "markupsafe"
|
2690 |
version = "2.1.5"
|
|
|
2787 |
{ url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 },
|
2788 |
]
|
2789 |
|
2790 |
+
[[package]]
|
2791 |
+
name = "mcp"
|
2792 |
+
version = "1.3.0"
|
2793 |
+
source = { registry = "https://pypi.org/simple" }
|
2794 |
+
dependencies = [
|
2795 |
+
{ name = "anyio" },
|
2796 |
+
{ name = "httpx" },
|
2797 |
+
{ name = "httpx-sse" },
|
2798 |
+
{ name = "pydantic" },
|
2799 |
+
{ name = "pydantic-settings" },
|
2800 |
+
{ name = "sse-starlette" },
|
2801 |
+
{ name = "starlette" },
|
2802 |
+
{ name = "uvicorn" },
|
2803 |
+
]
|
2804 |
+
sdist = { url = "https://files.pythonhosted.org/packages/6b/b6/81e5f2490290351fc97bf46c24ff935128cb7d34d68e3987b522f26f7ada/mcp-1.3.0.tar.gz", hash = "sha256:f409ae4482ce9d53e7ac03f3f7808bcab735bdfc0fba937453782efb43882d45", size = 150235 }
|
2805 |
+
wheels = [
|
2806 |
+
{ url = "https://files.pythonhosted.org/packages/d0/d2/a9e87b506b2094f5aa9becc1af5178842701b27217fa43877353da2577e3/mcp-1.3.0-py3-none-any.whl", hash = "sha256:2829d67ce339a249f803f22eba5e90385eafcac45c94b00cab6cef7e8f217211", size = 70672 },
|
2807 |
+
]
|
2808 |
+
|
2809 |
+
[[package]]
|
2810 |
+
name = "mdurl"
|
2811 |
+
version = "0.1.2"
|
2812 |
+
source = { registry = "https://pypi.org/simple" }
|
2813 |
+
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 }
|
2814 |
+
wheels = [
|
2815 |
+
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
|
2816 |
+
]
|
2817 |
+
|
2818 |
[[package]]
|
2819 |
name = "milvus-lite"
|
2820 |
version = "2.4.11"
|
|
|
3575 |
|
3576 |
[package.metadata]
|
3577 |
requires-dist = [
|
3578 |
+
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.27" },
|
3579 |
{ name = "chunkr-ai", specifier = ">=0.0.41" },
|
3580 |
{ name = "docx2markdown", specifier = ">=0.1.1" },
|
3581 |
{ name = "gradio", specifier = ">=3.50.2" },
|
|
|
4157 |
{ url = "https://files.pythonhosted.org/packages/a9/f9/b6bcaf874f410564a78908739c80861a171788ef4d4f76f5009656672dfe/pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753", size = 1920344 },
|
4158 |
]
|
4159 |
|
4160 |
+
[[package]]
|
4161 |
+
name = "pydantic-settings"
|
4162 |
+
version = "2.8.1"
|
4163 |
+
source = { registry = "https://pypi.org/simple" }
|
4164 |
+
dependencies = [
|
4165 |
+
{ name = "pydantic" },
|
4166 |
+
{ name = "python-dotenv" },
|
4167 |
+
]
|
4168 |
+
sdist = { url = "https://files.pythonhosted.org/packages/88/82/c79424d7d8c29b994fb01d277da57b0a9b09cc03c3ff875f9bd8a86b2145/pydantic_settings-2.8.1.tar.gz", hash = "sha256:d5c663dfbe9db9d5e1c646b2e161da12f0d734d422ee56f567d0ea2cee4e8585", size = 83550 }
|
4169 |
+
wheels = [
|
4170 |
+
{ url = "https://files.pythonhosted.org/packages/0b/53/a64f03044927dc47aafe029c42a5b7aabc38dfb813475e0e1bf71c4a59d0/pydantic_settings-2.8.1-py3-none-any.whl", hash = "sha256:81942d5ac3d905f7f3ee1a70df5dfb62d5569c12f51a5a647defc1c3d9ee2e9c", size = 30839 },
|
4171 |
+
]
|
4172 |
+
|
4173 |
[[package]]
|
4174 |
name = "pydub"
|
4175 |
version = "0.25.1"
|
|
|
4257 |
wheels = [
|
4258 |
{ url = "https://files.pythonhosted.org/packages/61/9b/98ef4b98309e9db3baa9fe572f0e61b6130bb9852d13189970f35b703499/pymupdf-1.25.3-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:96878e1b748f9c2011aecb2028c5f96b5a347a9a91169130ad0133053d97915e", size = 19343576 },
|
4259 |
{ url = "https://files.pythonhosted.org/packages/14/62/4e12126db174c8cfbf692281cda971cc4046c5f5226032c2cfaa6f83e08d/pymupdf-1.25.3-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:6ef753005b72ebfd23470f72f7e30f61e21b0b5e748045ec5b8f89e6e3068d62", size = 18580114 },
|
4260 |
+
{ url = "https://files.pythonhosted.org/packages/ec/c5/cf7ecf005e4f8ba3664d6aaa0613adeba4c2ab524832c452c69857e7184f/pymupdf-1.25.3-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cbff443d899f37b17f1e67563cc03673d50b4bf33ccc237e73d34f18f3a07ccf", size = 19442580 },
|
4261 |
{ url = "https://files.pythonhosted.org/packages/52/de/bd1418e31f73d37b8381cd5deacfd681e6be702b8890e123e83724569ee1/pymupdf-1.25.3-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46d90c4f9e62d1856e8db4b9f04a202ff4a7f086a816af73abdc86adb7f5e25a", size = 19999825 },
|
4262 |
{ url = "https://files.pythonhosted.org/packages/42/ee/3c449b0de061440ba1ac984aa845315e9e2dca0ff2003c5adfc6febff203/pymupdf-1.25.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a5de51efdbe4d486b6c1111c84e8a231cbfb426f3d6ff31ab530ad70e6f39756", size = 21123157 },
|
4263 |
{ url = "https://files.pythonhosted.org/packages/83/53/71faaaf91c56f2883b13f3dd849bf2697f012eb35eb7b952d62734cff41f/pymupdf-1.25.3-cp39-abi3-win32.whl", hash = "sha256:bca72e6089f985d800596e22973f79cc08af6cbff1d93e5bda9248326a03857c", size = 15094211 },
|
|
|
4840 |
{ url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490 },
|
4841 |
]
|
4842 |
|
4843 |
+
[[package]]
|
4844 |
+
name = "rich"
|
4845 |
+
version = "13.9.4"
|
4846 |
+
source = { registry = "https://pypi.org/simple" }
|
4847 |
+
dependencies = [
|
4848 |
+
{ name = "markdown-it-py" },
|
4849 |
+
{ name = "pygments" },
|
4850 |
+
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
|
4851 |
+
]
|
4852 |
+
sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 }
|
4853 |
+
wheels = [
|
4854 |
+
{ url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 },
|
4855 |
+
]
|
4856 |
+
|
4857 |
[[package]]
|
4858 |
name = "roman-numerals-py"
|
4859 |
version = "3.1.0"
|
|
|
5217 |
source = { registry = "https://pypi.org/simple" }
|
5218 |
sdist = { url = "https://files.pythonhosted.org/packages/9e/bd/3704a8c3e0942d711c1299ebf7b9091930adae6675d7c8f476a7ce48653c/sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9", size = 5750 }
|
5219 |
|
5220 |
+
[[package]]
|
5221 |
+
name = "shellingham"
|
5222 |
+
version = "1.5.4"
|
5223 |
+
source = { registry = "https://pypi.org/simple" }
|
5224 |
+
sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 }
|
5225 |
+
wheels = [
|
5226 |
+
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 },
|
5227 |
+
]
|
5228 |
+
|
5229 |
[[package]]
|
5230 |
name = "six"
|
5231 |
version = "1.17.0"
|
|
|
5551 |
{ url = "https://files.pythonhosted.org/packages/6b/ed/8bc1d54387434f4c1b99a54721691444e9e249bb728a0da47b3150c756d6/sqlglotrs-0.3.0-cp312-none-win_amd64.whl", hash = "sha256:b9f308732f12331f06c53fcb1d7c2b135a43aa22486b4c88c26d42710f329448", size = 190557 },
|
5552 |
]
|
5553 |
|
5554 |
+
[[package]]
|
5555 |
+
name = "sse-starlette"
|
5556 |
+
version = "2.2.1"
|
5557 |
+
source = { registry = "https://pypi.org/simple" }
|
5558 |
+
dependencies = [
|
5559 |
+
{ name = "anyio" },
|
5560 |
+
{ name = "starlette" },
|
5561 |
+
]
|
5562 |
+
sdist = { url = "https://files.pythonhosted.org/packages/71/a4/80d2a11af59fe75b48230846989e93979c892d3a20016b42bb44edb9e398/sse_starlette-2.2.1.tar.gz", hash = "sha256:54470d5f19274aeed6b2d473430b08b4b379ea851d953b11d7f1c4a2c118b419", size = 17376 }
|
5563 |
+
wheels = [
|
5564 |
+
{ url = "https://files.pythonhosted.org/packages/d9/e0/5b8bd393f27f4a62461c5cf2479c75a2cc2ffa330976f9f00f5f6e4f50eb/sse_starlette-2.2.1-py3-none-any.whl", hash = "sha256:6410a3d3ba0c89e7675d4c273a301d64649c03a5ef1ca101f10b47f895fd0e99", size = 10120 },
|
5565 |
+
]
|
5566 |
+
|
5567 |
[[package]]
|
5568 |
name = "stack-data"
|
5569 |
version = "0.6.3"
|
|
|
6008 |
{ url = "https://files.pythonhosted.org/packages/fe/7b/7757205dee3628f75e7991021d15cd1bd0c9b044ca9affe99b50879fc0e1/triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb", size = 209464695 },
|
6009 |
]
|
6010 |
|
6011 |
+
[[package]]
|
6012 |
+
name = "typer"
|
6013 |
+
version = "0.15.2"
|
6014 |
+
source = { registry = "https://pypi.org/simple" }
|
6015 |
+
dependencies = [
|
6016 |
+
{ name = "click" },
|
6017 |
+
{ name = "rich" },
|
6018 |
+
{ name = "shellingham" },
|
6019 |
+
{ name = "typing-extensions" },
|
6020 |
+
]
|
6021 |
+
sdist = { url = "https://files.pythonhosted.org/packages/8b/6f/3991f0f1c7fcb2df31aef28e0594d8d54b05393a0e4e34c65e475c2a5d41/typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5", size = 100711 }
|
6022 |
+
wheels = [
|
6023 |
+
{ url = "https://files.pythonhosted.org/packages/7f/fc/5b29fea8cee020515ca82cc68e3b8e1e34bb19a3535ad854cac9257b414c/typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc", size = 45061 },
|
6024 |
+
]
|
6025 |
+
|
6026 |
[[package]]
|
6027 |
name = "types-colorama"
|
6028 |
version = "0.4.15.20240311"
|