Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .dockerignore +2 -0
- .env.example +56 -0
- .gitattributes +1 -0
- .github/workflows/update_space.yml +28 -0
- .gitignore +189 -0
- .vscode/settings.json +11 -0
- Dockerfile +42 -0
- Dockerfile.arm64 +85 -0
- Dockerfile.railway +42 -0
- LICENSE +21 -0
- README.md +239 -12
- SECURITY.md +19 -0
- assets/examples/test.png +3 -0
- assets/web-ui.png +0 -0
- docker-compose.yml +59 -0
- entrypoint.sh +4 -0
- requirements.txt +7 -0
- src/__init__.py +0 -0
- src/agent/__init__.py +0 -0
- src/agent/custom_agent.py +478 -0
- src/agent/custom_message_manager.py +111 -0
- src/agent/custom_prompts.py +125 -0
- src/agent/custom_system_prompt.md +80 -0
- src/agent/custom_views.py +67 -0
- src/browser/__init__.py +0 -0
- src/browser/custom_browser.py +28 -0
- src/browser/custom_context.py +19 -0
- src/controller/__init__.py +0 -0
- src/controller/custom_controller.py +49 -0
- src/utils/__init__.py +0 -0
- src/utils/agent_state.py +31 -0
- src/utils/deep_research.py +387 -0
- src/utils/llm.py +138 -0
- src/utils/utils.py +400 -0
- supervisord.conf +96 -0
- tests/test_browser_use.py +364 -0
- tests/test_deep_research.py +30 -0
- tests/test_llm_api.py +137 -0
- tests/test_playwright.py +31 -0
- webui.py +1203 -0
.dockerignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
data
|
2 |
+
tmp
|
.env.example
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
OPENAI_ENDPOINT=https://api.openai.com/v1
|
2 |
+
OPENAI_API_KEY=
|
3 |
+
|
4 |
+
ANTHROPIC_API_KEY=
|
5 |
+
ANTHROPIC_ENDPOINT=https://api.anthropic.com
|
6 |
+
|
7 |
+
GOOGLE_API_KEY=
|
8 |
+
|
9 |
+
AZURE_OPENAI_ENDPOINT=
|
10 |
+
AZURE_OPENAI_API_KEY=
|
11 |
+
AZURE_OPENAI_API_VERSION=2025-01-01-preview
|
12 |
+
|
13 |
+
DEEPSEEK_ENDPOINT=https://api.deepseek.com
|
14 |
+
DEEPSEEK_API_KEY=
|
15 |
+
|
16 |
+
MISTRAL_API_KEY=
|
17 |
+
MISTRAL_ENDPOINT=https://api.mistral.ai/v1
|
18 |
+
|
19 |
+
OLLAMA_ENDPOINT=http://localhost:11434
|
20 |
+
|
21 |
+
ALIBABA_ENDPOINT=https://dashscope.aliyuncs.com/compatible-mode/v1
|
22 |
+
ALIBABA_API_KEY=
|
23 |
+
|
24 |
+
MOONSHOT_ENDPOINT=https://api.moonshot.cn/v1
|
25 |
+
MOONSHOT_API_KEY=
|
26 |
+
|
27 |
+
UNBOUND_ENDPOINT=https://api.getunbound.ai
|
28 |
+
UNBOUND_API_KEY=
|
29 |
+
|
30 |
+
SiliconFLOW_ENDPOINT=https://api.siliconflow.cn/v1/
|
31 |
+
SiliconFLOW_API_KEY=
|
32 |
+
|
33 |
+
# Set to false to disable anonymized telemetry
|
34 |
+
ANONYMIZED_TELEMETRY=false
|
35 |
+
|
36 |
+
# LogLevel: Set to debug to enable verbose logging, set to result to get results only. Available: result | debug | info
|
37 |
+
BROWSER_USE_LOGGING_LEVEL=info
|
38 |
+
|
39 |
+
# Chrome settings
|
40 |
+
CHROME_PATH=
|
41 |
+
CHROME_USER_DATA=
|
42 |
+
CHROME_DEBUGGING_PORT=9222
|
43 |
+
CHROME_DEBUGGING_HOST=localhost
|
44 |
+
# Set to true to keep browser open between AI tasks
|
45 |
+
CHROME_PERSISTENT_SESSION=false
|
46 |
+
CHROME_CDP=
|
47 |
+
# Display settings
|
48 |
+
# Format: WIDTHxHEIGHTxDEPTH
|
49 |
+
RESOLUTION=1920x1080x24
|
50 |
+
# Width in pixels
|
51 |
+
RESOLUTION_WIDTH=1920
|
52 |
+
# Height in pixels
|
53 |
+
RESOLUTION_HEIGHT=1080
|
54 |
+
|
55 |
+
# VNC settings
|
56 |
+
VNC_PASSWORD=youvncpassword
|
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/examples/test.png filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
110 |
+
.pdm.toml
|
111 |
+
.pdm-python
|
112 |
+
.pdm-build/
|
113 |
+
|
114 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
+
__pypackages__/
|
116 |
+
|
117 |
+
# Celery stuff
|
118 |
+
celerybeat-schedule
|
119 |
+
celerybeat.pid
|
120 |
+
|
121 |
+
# SageMath parsed files
|
122 |
+
*.sage.py
|
123 |
+
|
124 |
+
# Environments
|
125 |
+
.env
|
126 |
+
.venv
|
127 |
+
env/
|
128 |
+
venv/
|
129 |
+
ENV/
|
130 |
+
env.bak/
|
131 |
+
venv.bak/
|
132 |
+
test_env/
|
133 |
+
myenv
|
134 |
+
|
135 |
+
|
136 |
+
# Spyder project settings
|
137 |
+
.spyderproject
|
138 |
+
.spyproject
|
139 |
+
|
140 |
+
# Rope project settings
|
141 |
+
.ropeproject
|
142 |
+
|
143 |
+
# mkdocs documentation
|
144 |
+
/site
|
145 |
+
|
146 |
+
# mypy
|
147 |
+
.mypy_cache/
|
148 |
+
.dmypy.json
|
149 |
+
dmypy.json
|
150 |
+
|
151 |
+
# Pyre type checker
|
152 |
+
.pyre/
|
153 |
+
|
154 |
+
# pytype static type analyzer
|
155 |
+
.pytype/
|
156 |
+
|
157 |
+
# Cython debug symbols
|
158 |
+
cython_debug/
|
159 |
+
|
160 |
+
# PyCharm
|
161 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
162 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
163 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
164 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
165 |
+
.idea/
|
166 |
+
temp
|
167 |
+
tmp
|
168 |
+
|
169 |
+
|
170 |
+
.DS_Store
|
171 |
+
|
172 |
+
private_example.py
|
173 |
+
private_example
|
174 |
+
|
175 |
+
browser_cookies.json
|
176 |
+
cookies.json
|
177 |
+
AgentHistory.json
|
178 |
+
cv_04_24.pdf
|
179 |
+
AgentHistoryList.json
|
180 |
+
*.gif
|
181 |
+
|
182 |
+
# For Sharing (.pem files)
|
183 |
+
.gradio/
|
184 |
+
|
185 |
+
# For Docker
|
186 |
+
data/
|
187 |
+
|
188 |
+
# For Config Files (Current Settings)
|
189 |
+
.config.pkl
|
.vscode/settings.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"python.analysis.typeCheckingMode": "basic",
|
3 |
+
"[python]": {
|
4 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
5 |
+
"editor.formatOnSave": true,
|
6 |
+
"editor.codeActionsOnSave": {
|
7 |
+
"source.fixAll.ruff": "explicit",
|
8 |
+
"source.organizeImports.ruff": "explicit"
|
9 |
+
}
|
10 |
+
}
|
11 |
+
}
|
Dockerfile
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Build stage
|
2 |
+
FROM python:3.11-slim as builder
|
3 |
+
|
4 |
+
WORKDIR /app
|
5 |
+
COPY requirements.txt .
|
6 |
+
|
7 |
+
# Install dependencies in a virtual environment
|
8 |
+
RUN python -m venv /opt/venv
|
9 |
+
ENV PATH="/opt/venv/bin:$PATH"
|
10 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
11 |
+
|
12 |
+
# Runtime stage
|
13 |
+
FROM python:3.11-slim
|
14 |
+
|
15 |
+
# Install minimal runtime dependencies
|
16 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
17 |
+
libgomp1 \
|
18 |
+
&& rm -rf /var/lib/apt/lists/*
|
19 |
+
|
20 |
+
WORKDIR /app
|
21 |
+
|
22 |
+
# Copy virtual environment from builder
|
23 |
+
COPY --from=builder /opt/venv /opt/venv
|
24 |
+
ENV PATH="/opt/venv/bin:$PATH"
|
25 |
+
|
26 |
+
# Copy application code
|
27 |
+
COPY . .
|
28 |
+
|
29 |
+
# Set environment variables
|
30 |
+
ENV PYTHONUNBUFFERED=1
|
31 |
+
ENV GRADIO_SERVER_NAME=0.0.0.0
|
32 |
+
ENV GRADIO_SERVER_PORT=80
|
33 |
+
|
34 |
+
# Health check
|
35 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
36 |
+
CMD curl -f http://localhost:80 || exit 1
|
37 |
+
|
38 |
+
# Expose port
|
39 |
+
EXPOSE 80
|
40 |
+
|
41 |
+
# Run the application
|
42 |
+
CMD ["python", "webui.py"]
|
Dockerfile.arm64
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
# Install system dependencies
|
4 |
+
RUN apt-get update && apt-get install -y \
|
5 |
+
wget \
|
6 |
+
gnupg \
|
7 |
+
curl \
|
8 |
+
unzip \
|
9 |
+
xvfb \
|
10 |
+
libgconf-2-4 \
|
11 |
+
libxss1 \
|
12 |
+
libnss3 \
|
13 |
+
libnspr4 \
|
14 |
+
libasound2 \
|
15 |
+
libatk1.0-0 \
|
16 |
+
libatk-bridge2.0-0 \
|
17 |
+
libcups2 \
|
18 |
+
libdbus-1-3 \
|
19 |
+
libdrm2 \
|
20 |
+
libgbm1 \
|
21 |
+
libgtk-3-0 \
|
22 |
+
libxcomposite1 \
|
23 |
+
libxdamage1 \
|
24 |
+
libxfixes3 \
|
25 |
+
libxrandr2 \
|
26 |
+
xdg-utils \
|
27 |
+
fonts-liberation \
|
28 |
+
dbus \
|
29 |
+
xauth \
|
30 |
+
xvfb \
|
31 |
+
x11vnc \
|
32 |
+
tigervnc-tools \
|
33 |
+
supervisor \
|
34 |
+
net-tools \
|
35 |
+
procps \
|
36 |
+
git \
|
37 |
+
python3-numpy \
|
38 |
+
fontconfig \
|
39 |
+
fonts-dejavu \
|
40 |
+
fonts-dejavu-core \
|
41 |
+
fonts-dejavu-extra \
|
42 |
+
&& rm -rf /var/lib/apt/lists/*
|
43 |
+
|
44 |
+
# Install noVNC
|
45 |
+
RUN git clone https://github.com/novnc/noVNC.git /opt/novnc \
|
46 |
+
&& git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \
|
47 |
+
&& ln -s /opt/novnc/vnc.html /opt/novnc/index.html
|
48 |
+
|
49 |
+
# Set platform explicitly for ARM64
|
50 |
+
ARG TARGETPLATFORM=linux/arm64
|
51 |
+
|
52 |
+
# Set up working directory
|
53 |
+
WORKDIR /app
|
54 |
+
|
55 |
+
# Copy requirements and install Python dependencies
|
56 |
+
COPY requirements.txt .
|
57 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
58 |
+
|
59 |
+
# Install Playwright and browsers with system dependencies optimized for ARM64
|
60 |
+
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
|
61 |
+
RUN PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 pip install playwright && \
|
62 |
+
playwright install --with-deps chromium
|
63 |
+
|
64 |
+
# Copy the application code
|
65 |
+
COPY . .
|
66 |
+
|
67 |
+
# Set environment variables
|
68 |
+
ENV PYTHONUNBUFFERED=1
|
69 |
+
ENV BROWSER_USE_LOGGING_LEVEL=info
|
70 |
+
ENV CHROME_PATH=/ms-playwright/chromium-*/chrome-linux/chrome
|
71 |
+
ENV ANONYMIZED_TELEMETRY=false
|
72 |
+
ENV DISPLAY=:99
|
73 |
+
ENV RESOLUTION=1920x1080x24
|
74 |
+
ENV VNC_PASSWORD=vncpassword
|
75 |
+
ENV CHROME_PERSISTENT_SESSION=true
|
76 |
+
ENV RESOLUTION_WIDTH=1920
|
77 |
+
ENV RESOLUTION_HEIGHT=1080
|
78 |
+
|
79 |
+
# Set up supervisor configuration
|
80 |
+
RUN mkdir -p /var/log/supervisor
|
81 |
+
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
82 |
+
|
83 |
+
EXPOSE 7788 6080 5901
|
84 |
+
|
85 |
+
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
|
Dockerfile.railway
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Build stage
|
2 |
+
FROM python:3.11-slim as builder
|
3 |
+
|
4 |
+
WORKDIR /app
|
5 |
+
COPY requirements.txt .
|
6 |
+
|
7 |
+
# Install dependencies in a virtual environment
|
8 |
+
RUN python -m venv /opt/venv
|
9 |
+
ENV PATH="/opt/venv/bin:$PATH"
|
10 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
11 |
+
|
12 |
+
# Runtime stage
|
13 |
+
FROM python:3.11-slim
|
14 |
+
|
15 |
+
# Install minimal runtime dependencies
|
16 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
17 |
+
libgomp1 \
|
18 |
+
&& rm -rf /var/lib/apt/lists/*
|
19 |
+
|
20 |
+
WORKDIR /app
|
21 |
+
|
22 |
+
# Copy virtual environment from builder
|
23 |
+
COPY --from=builder /opt/venv /opt/venv
|
24 |
+
ENV PATH="/opt/venv/bin:$PATH"
|
25 |
+
|
26 |
+
# Copy application code
|
27 |
+
COPY . .
|
28 |
+
|
29 |
+
# Set environment variables
|
30 |
+
ENV PYTHONUNBUFFERED=1
|
31 |
+
ENV GRADIO_SERVER_NAME=0.0.0.0
|
32 |
+
ENV GRADIO_SERVER_PORT=80
|
33 |
+
|
34 |
+
# Health check
|
35 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
36 |
+
CMD curl -f http://localhost:80 || exit 1
|
37 |
+
|
38 |
+
# Expose port
|
39 |
+
EXPOSE 80
|
40 |
+
|
41 |
+
# Run the application
|
42 |
+
CMD ["python", "webui.py"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Browser Use Inc.
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,239 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: browser-use-sg
|
3 |
+
app_file: webui.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 5.23.1
|
6 |
+
---
|
7 |
+
<img src="./assets/web-ui.png" alt="Browser Use Web UI" width="full"/>
|
8 |
+
|
9 |
+
<br/>
|
10 |
+
|
11 |
+
[](https://github.com/browser-use/web-ui/stargazers)
|
12 |
+
[](https://link.browser-use.com/discord)
|
13 |
+
[](https://docs.browser-use.com)
|
14 |
+
[](https://x.com/warmshao)
|
15 |
+
|
16 |
+
This project builds upon the foundation of the [browser-use](https://github.com/browser-use/browser-use), which is designed to make websites accessible for AI agents.
|
17 |
+
|
18 |
+
We would like to officially thank [WarmShao](https://github.com/warmshao) for his contribution to this project.
|
19 |
+
|
20 |
+
**WebUI:** is built on Gradio and supports most of `browser-use` functionalities. This UI is designed to be user-friendly and enables easy interaction with the browser agent.
|
21 |
+
|
22 |
+
**Expanded LLM Support:** We've integrated support for various Large Language Models (LLMs), including: Google, OpenAI, Azure OpenAI, Anthropic, DeepSeek, Ollama etc. And we plan to add support for even more models in the future.
|
23 |
+
|
24 |
+
**Custom Browser Support:** You can use your own browser with our tool, eliminating the need to re-login to sites or deal with other authentication challenges. This feature also supports high-definition screen recording.
|
25 |
+
|
26 |
+
**Persistent Browser Sessions:** You can choose to keep the browser window open between AI tasks, allowing you to see the complete history and state of AI interactions.
|
27 |
+
|
28 |
+
<video src="https://github.com/user-attachments/assets/56bc7080-f2e3-4367-af22-6bf2245ff6cb" controls="controls">Your browser does not support playing this video!</video>
|
29 |
+
|
30 |
+
## Installation Guide
|
31 |
+
|
32 |
+
### Prerequisites
|
33 |
+
- Python 3.11 or higher
|
34 |
+
- Git (for cloning the repository)
|
35 |
+
|
36 |
+
### Option 1: Local Installation
|
37 |
+
|
38 |
+
Read the [quickstart guide](https://docs.browser-use.com/quickstart#prepare-the-environment) or follow the steps below to get started.
|
39 |
+
|
40 |
+
#### Step 1: Clone the Repository
|
41 |
+
```bash
|
42 |
+
git clone https://github.com/browser-use/web-ui.git
|
43 |
+
cd web-ui
|
44 |
+
```
|
45 |
+
|
46 |
+
#### Step 2: Set Up Python Environment
|
47 |
+
We recommend using [uv](https://docs.astral.sh/uv/) for managing the Python environment.
|
48 |
+
|
49 |
+
Using uv (recommended):
|
50 |
+
```bash
|
51 |
+
uv venv --python 3.11
|
52 |
+
```
|
53 |
+
|
54 |
+
Activate the virtual environment:
|
55 |
+
- Windows (Command Prompt):
|
56 |
+
```cmd
|
57 |
+
.venv\Scripts\activate
|
58 |
+
```
|
59 |
+
- Windows (PowerShell):
|
60 |
+
```powershell
|
61 |
+
.\.venv\Scripts\Activate.ps1
|
62 |
+
```
|
63 |
+
- macOS/Linux:
|
64 |
+
```bash
|
65 |
+
source .venv/bin/activate
|
66 |
+
```
|
67 |
+
|
68 |
+
#### Step 3: Install Dependencies
|
69 |
+
Install Python packages:
|
70 |
+
```bash
|
71 |
+
uv pip install -r requirements.txt
|
72 |
+
```
|
73 |
+
|
74 |
+
Install Browsers in Playwright:
|
75 |
+
You can install specific browsers by running:
|
76 |
+
```bash
|
77 |
+
playwright install --with-deps chromium
|
78 |
+
```
|
79 |
+
|
80 |
+
To install all browsers:
|
81 |
+
```bash
|
82 |
+
playwright install
|
83 |
+
```
|
84 |
+
|
85 |
+
#### Step 4: Configure Environment
|
86 |
+
1. Create a copy of the example environment file:
|
87 |
+
- Windows (Command Prompt):
|
88 |
+
```bash
|
89 |
+
copy .env.example .env
|
90 |
+
```
|
91 |
+
- macOS/Linux/Windows (PowerShell):
|
92 |
+
```bash
|
93 |
+
cp .env.example .env
|
94 |
+
```
|
95 |
+
2. Open `.env` in your preferred text editor and add your API keys and other settings
|
96 |
+
|
97 |
+
### Option 2: Docker Installation
|
98 |
+
|
99 |
+
#### Prerequisites
|
100 |
+
- Docker and Docker Compose installed
|
101 |
+
- [Docker Desktop](https://www.docker.com/products/docker-desktop/) (For Windows/macOS)
|
102 |
+
- [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) (For Linux)
|
103 |
+
|
104 |
+
#### Installation Steps
|
105 |
+
1. Clone the repository:
|
106 |
+
```bash
|
107 |
+
git clone https://github.com/browser-use/web-ui.git
|
108 |
+
cd web-ui
|
109 |
+
```
|
110 |
+
|
111 |
+
2. Create and configure environment file:
|
112 |
+
- Windows (Command Prompt):
|
113 |
+
```bash
|
114 |
+
copy .env.example .env
|
115 |
+
```
|
116 |
+
- macOS/Linux/Windows (PowerShell):
|
117 |
+
```bash
|
118 |
+
cp .env.example .env
|
119 |
+
```
|
120 |
+
Edit `.env` with your preferred text editor and add your API keys
|
121 |
+
|
122 |
+
3. Run with Docker:
|
123 |
+
```bash
|
124 |
+
# Build and start the container with default settings (browser closes after AI tasks)
|
125 |
+
docker compose up --build
|
126 |
+
```
|
127 |
+
```bash
|
128 |
+
# Or run with persistent browser (browser stays open between AI tasks)
|
129 |
+
CHROME_PERSISTENT_SESSION=true docker compose up --build
|
130 |
+
```
|
131 |
+
|
132 |
+
|
133 |
+
4. Access the Application:
|
134 |
+
- Web Interface: Open `http://localhost:7788` in your browser
|
135 |
+
- VNC Viewer (for watching browser interactions): Open `http://localhost:6080/vnc.html`
|
136 |
+
- Default VNC password: "youvncpassword"
|
137 |
+
- Can be changed by setting `VNC_PASSWORD` in your `.env` file
|
138 |
+
|
139 |
+
## Usage
|
140 |
+
|
141 |
+
### Local Setup
|
142 |
+
1. **Run the WebUI:**
|
143 |
+
After completing the installation steps above, start the application:
|
144 |
+
```bash
|
145 |
+
python webui.py --ip 127.0.0.1 --port 7788
|
146 |
+
```
|
147 |
+
2. WebUI options:
|
148 |
+
- `--ip`: The IP address to bind the WebUI to. Default is `127.0.0.1`.
|
149 |
+
- `--port`: The port to bind the WebUI to. Default is `7788`.
|
150 |
+
- `--theme`: The theme for the user interface. Default is `Ocean`.
|
151 |
+
- **Default**: The standard theme with a balanced design.
|
152 |
+
- **Soft**: A gentle, muted color scheme for a relaxed viewing experience.
|
153 |
+
- **Monochrome**: A grayscale theme with minimal color for simplicity and focus.
|
154 |
+
- **Glass**: A sleek, semi-transparent design for a modern appearance.
|
155 |
+
- **Origin**: A classic, retro-inspired theme for a nostalgic feel.
|
156 |
+
- **Citrus**: A vibrant, citrus-inspired palette with bright and fresh colors.
|
157 |
+
- **Ocean** (default): A blue, ocean-inspired theme providing a calming effect.
|
158 |
+
- `--dark-mode`: Enables dark mode for the user interface.
|
159 |
+
3. **Access the WebUI:** Open your web browser and navigate to `http://127.0.0.1:7788`.
|
160 |
+
4. **Using Your Own Browser(Optional):**
|
161 |
+
- Set `CHROME_PATH` to the executable path of your browser and `CHROME_USER_DATA` to the user data directory of your browser. Leave `CHROME_USER_DATA` empty if you want to use local user data.
|
162 |
+
- Windows
|
163 |
+
```env
|
164 |
+
CHROME_PATH="C:\Program Files\Google\Chrome\Application\chrome.exe"
|
165 |
+
CHROME_USER_DATA="C:\Users\YourUsername\AppData\Local\Google\Chrome\User Data"
|
166 |
+
```
|
167 |
+
> Note: Replace `YourUsername` with your actual Windows username for Windows systems.
|
168 |
+
- Mac
|
169 |
+
```env
|
170 |
+
CHROME_PATH="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
|
171 |
+
CHROME_USER_DATA="/Users/YourUsername/Library/Application Support/Google/Chrome"
|
172 |
+
```
|
173 |
+
- Close all Chrome windows
|
174 |
+
- Open the WebUI in a non-Chrome browser, such as Firefox or Edge. This is important because the persistent browser context will use the Chrome data when running the agent.
|
175 |
+
- Check the "Use Own Browser" option within the Browser Settings.
|
176 |
+
5. **Keep Browser Open(Optional):**
|
177 |
+
- Set `CHROME_PERSISTENT_SESSION=true` in the `.env` file.
|
178 |
+
|
179 |
+
### Docker Setup
|
180 |
+
1. **Environment Variables:**
|
181 |
+
- All configuration is done through the `.env` file
|
182 |
+
- Available environment variables:
|
183 |
+
```
|
184 |
+
# LLM API Keys
|
185 |
+
OPENAI_API_KEY=your_key_here
|
186 |
+
ANTHROPIC_API_KEY=your_key_here
|
187 |
+
GOOGLE_API_KEY=your_key_here
|
188 |
+
|
189 |
+
# Browser Settings
|
190 |
+
CHROME_PERSISTENT_SESSION=true # Set to true to keep browser open between AI tasks
|
191 |
+
RESOLUTION=1920x1080x24 # Custom resolution format: WIDTHxHEIGHTxDEPTH
|
192 |
+
RESOLUTION_WIDTH=1920 # Custom width in pixels
|
193 |
+
RESOLUTION_HEIGHT=1080 # Custom height in pixels
|
194 |
+
|
195 |
+
# VNC Settings
|
196 |
+
VNC_PASSWORD=your_vnc_password # Optional, defaults to "vncpassword"
|
197 |
+
```
|
198 |
+
|
199 |
+
2. **Platform Support:**
|
200 |
+
- Supports both AMD64 and ARM64 architectures
|
201 |
+
- For ARM64 systems (e.g., Apple Silicon Macs), the container will automatically use the appropriate image
|
202 |
+
|
203 |
+
3. **Browser Persistence Modes:**
|
204 |
+
- **Default Mode (CHROME_PERSISTENT_SESSION=false):**
|
205 |
+
- Browser opens and closes with each AI task
|
206 |
+
- Clean state for each interaction
|
207 |
+
- Lower resource usage
|
208 |
+
|
209 |
+
- **Persistent Mode (CHROME_PERSISTENT_SESSION=true):**
|
210 |
+
- Browser stays open between AI tasks
|
211 |
+
- Maintains history and state
|
212 |
+
- Allows viewing previous AI interactions
|
213 |
+
- Set in `.env` file or via environment variable when starting container
|
214 |
+
|
215 |
+
4. **Viewing Browser Interactions:**
|
216 |
+
- Access the noVNC viewer at `http://localhost:6080/vnc.html`
|
217 |
+
- Enter the VNC password (default: "vncpassword" or what you set in VNC_PASSWORD)
|
218 |
+
- Direct VNC access available on port 5900 (mapped to container port 5901)
|
219 |
+
- You can now see all browser interactions in real-time
|
220 |
+
|
221 |
+
5. **Container Management:**
|
222 |
+
```bash
|
223 |
+
# Start with persistent browser
|
224 |
+
CHROME_PERSISTENT_SESSION=true docker compose up -d
|
225 |
+
|
226 |
+
# Start with default mode (browser closes after tasks)
|
227 |
+
docker compose up -d
|
228 |
+
|
229 |
+
# View logs
|
230 |
+
docker compose logs -f
|
231 |
+
|
232 |
+
# Stop the container
|
233 |
+
docker compose down
|
234 |
+
```
|
235 |
+
|
236 |
+
## Changelog
|
237 |
+
- [x] **2025/01/26:** Thanks to @vvincent1234. Now browser-use-webui can combine with DeepSeek-r1 to engage in deep thinking!
|
238 |
+
- [x] **2025/01/10:** Thanks to @casistack. Now we have Docker Setup option and also Support keep browser open between tasks.[Video tutorial demo](https://github.com/browser-use/web-ui/issues/1#issuecomment-2582511750).
|
239 |
+
- [x] **2025/01/06:** Thanks to @richard-devbot. A New and Well-Designed WebUI is released. [Video tutorial demo](https://github.com/warmshao/browser-use-webui/issues/1#issuecomment-2573393113).
|
SECURITY.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Reporting Security Issues
|
2 |
+
|
3 |
+
If you believe you have found a security vulnerability in browser-use, please report it through coordinated disclosure.
|
4 |
+
|
5 |
+
**Please do not report security vulnerabilities through the repository issues, discussions, or pull requests.**
|
6 |
+
|
7 |
+
Instead, please open a new [Github security advisory](https://github.com/browser-use/web-ui/security/advisories/new).
|
8 |
+
|
9 |
+
Please include as much of the information listed below as you can to help me better understand and resolve the issue:
|
10 |
+
|
11 |
+
* The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting)
|
12 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
13 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
14 |
+
* Any special configuration required to reproduce the issue
|
15 |
+
* Step-by-step instructions to reproduce the issue
|
16 |
+
* Proof-of-concept or exploit code (if possible)
|
17 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
18 |
+
|
19 |
+
This information will help me triage your report more quickly.
|
assets/examples/test.png
ADDED
![]() |
Git LFS Details
|
assets/web-ui.png
ADDED
![]() |
docker-compose.yml
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
browser-use-webui:
|
3 |
+
platform: linux/amd64
|
4 |
+
build:
|
5 |
+
context: .
|
6 |
+
dockerfile: ${DOCKERFILE:-Dockerfile}
|
7 |
+
args:
|
8 |
+
TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64}
|
9 |
+
ports:
|
10 |
+
- "7788:7788" # Gradio default port
|
11 |
+
- "6080:6080" # noVNC web interface
|
12 |
+
- "5901:5901" # VNC port
|
13 |
+
- "9222:9222" # Chrome remote debugging port
|
14 |
+
environment:
|
15 |
+
- OPENAI_ENDPOINT=${OPENAI_ENDPOINT:-https://api.openai.com/v1}
|
16 |
+
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
17 |
+
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
18 |
+
- ANTHROPIC_ENDPOINT=${ANTHROPIC_ENDPOINT:-https://api.anthropic.com}
|
19 |
+
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
|
20 |
+
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
|
21 |
+
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
|
22 |
+
- DEEPSEEK_ENDPOINT=${DEEPSEEK_ENDPOINT:-https://api.deepseek.com}
|
23 |
+
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
24 |
+
- OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-http://localhost:11434}
|
25 |
+
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
26 |
+
- MISTRAL_ENDPOINT=${MISTRAL_ENDPOINT:-https://api.mistral.ai/v1}
|
27 |
+
- ALIBABA_ENDPOINT=${ALIBABA_ENDPOINT:-https://dashscope.aliyuncs.com/compatible-mode/v1}
|
28 |
+
- ALIBABA_API_KEY=${ALIBABA_API_KEY:-}
|
29 |
+
- MOONSHOT_ENDPOINT=${MOONSHOT_ENDPOINT:-https://api.moonshot.cn/v1}
|
30 |
+
- MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-}
|
31 |
+
- BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info}
|
32 |
+
- ANONYMIZED_TELEMETRY=${ANONYMIZED_TELEMETRY:-false}
|
33 |
+
- CHROME_PATH=/usr/bin/google-chrome
|
34 |
+
- CHROME_USER_DATA=/app/data/chrome_data
|
35 |
+
- CHROME_PERSISTENT_SESSION=${CHROME_PERSISTENT_SESSION:-false}
|
36 |
+
- CHROME_CDP=${CHROME_CDP:-http://localhost:9222}
|
37 |
+
- DISPLAY=:99
|
38 |
+
- PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
|
39 |
+
- RESOLUTION=${RESOLUTION:-1920x1080x24}
|
40 |
+
- RESOLUTION_WIDTH=${RESOLUTION_WIDTH:-1920}
|
41 |
+
- RESOLUTION_HEIGHT=${RESOLUTION_HEIGHT:-1080}
|
42 |
+
- VNC_PASSWORD=${VNC_PASSWORD:-vncpassword}
|
43 |
+
- CHROME_DEBUGGING_PORT=9222
|
44 |
+
- CHROME_DEBUGGING_HOST=localhost
|
45 |
+
volumes:
|
46 |
+
- /tmp/.X11-unix:/tmp/.X11-unix
|
47 |
+
restart: unless-stopped
|
48 |
+
shm_size: '2gb'
|
49 |
+
cap_add:
|
50 |
+
- SYS_ADMIN
|
51 |
+
security_opt:
|
52 |
+
- seccomp=unconfined
|
53 |
+
tmpfs:
|
54 |
+
- /tmp
|
55 |
+
healthcheck:
|
56 |
+
test: ["CMD", "nc", "-z", "localhost", "5901"]
|
57 |
+
interval: 10s
|
58 |
+
timeout: 5s
|
59 |
+
retries: 3
|
entrypoint.sh
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Start supervisord in the foreground to properly manage child processes
|
4 |
+
exec /usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
browser-use==0.1.40
|
2 |
+
pyperclip==1.9.0
|
3 |
+
gradio==5.23.1
|
4 |
+
json-repair
|
5 |
+
langchain-mistralai==0.2.4
|
6 |
+
langchain-google-genai==2.0.8
|
7 |
+
MainContentExtractor==0.0.4
|
src/__init__.py
ADDED
File without changes
|
src/agent/__init__.py
ADDED
File without changes
|
src/agent/custom_agent.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
import pdb
|
4 |
+
import traceback
|
5 |
+
from typing import Any, Awaitable, Callable, Dict, Generic, List, Optional, Type, TypeVar
|
6 |
+
from PIL import Image, ImageDraw, ImageFont
|
7 |
+
import os
|
8 |
+
import base64
|
9 |
+
import io
|
10 |
+
import asyncio
|
11 |
+
import time
|
12 |
+
import platform
|
13 |
+
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
|
14 |
+
from browser_use.agent.service import Agent
|
15 |
+
from browser_use.agent.message_manager.utils import convert_input_messages, extract_json_from_model_output, \
|
16 |
+
save_conversation
|
17 |
+
from browser_use.agent.views import (
|
18 |
+
ActionResult,
|
19 |
+
AgentError,
|
20 |
+
AgentHistory,
|
21 |
+
AgentHistoryList,
|
22 |
+
AgentOutput,
|
23 |
+
AgentSettings,
|
24 |
+
AgentState,
|
25 |
+
AgentStepInfo,
|
26 |
+
StepMetadata,
|
27 |
+
ToolCallingMethod,
|
28 |
+
)
|
29 |
+
from browser_use.agent.gif import create_history_gif
|
30 |
+
from browser_use.browser.browser import Browser
|
31 |
+
from browser_use.browser.context import BrowserContext
|
32 |
+
from browser_use.browser.views import BrowserStateHistory
|
33 |
+
from browser_use.controller.service import Controller
|
34 |
+
from browser_use.telemetry.views import (
|
35 |
+
AgentEndTelemetryEvent,
|
36 |
+
AgentRunTelemetryEvent,
|
37 |
+
AgentStepTelemetryEvent,
|
38 |
+
)
|
39 |
+
from browser_use.utils import time_execution_async
|
40 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
41 |
+
from langchain_core.messages import (
|
42 |
+
BaseMessage,
|
43 |
+
HumanMessage,
|
44 |
+
AIMessage
|
45 |
+
)
|
46 |
+
from browser_use.browser.views import BrowserState, BrowserStateHistory
|
47 |
+
from browser_use.agent.prompts import PlannerPrompt
|
48 |
+
|
49 |
+
from json_repair import repair_json
|
50 |
+
from src.utils.agent_state import AgentState
|
51 |
+
|
52 |
+
from .custom_message_manager import CustomMessageManager, CustomMessageManagerSettings
|
53 |
+
from .custom_views import CustomAgentOutput, CustomAgentStepInfo, CustomAgentState
|
54 |
+
|
55 |
+
logger = logging.getLogger(__name__)
|
56 |
+
|
57 |
+
Context = TypeVar('Context')
|
58 |
+
|
59 |
+
|
60 |
+
class CustomAgent(Agent):
|
61 |
+
def __init__(
|
62 |
+
self,
|
63 |
+
task: str,
|
64 |
+
llm: BaseChatModel,
|
65 |
+
add_infos: str = "",
|
66 |
+
# Optional parameters
|
67 |
+
browser: Browser | None = None,
|
68 |
+
browser_context: BrowserContext | None = None,
|
69 |
+
controller: Controller[Context] = Controller(),
|
70 |
+
# Initial agent run parameters
|
71 |
+
sensitive_data: Optional[Dict[str, str]] = None,
|
72 |
+
initial_actions: Optional[List[Dict[str, Dict[str, Any]]]] = None,
|
73 |
+
# Cloud Callbacks
|
74 |
+
register_new_step_callback: Callable[['BrowserState', 'AgentOutput', int], Awaitable[None]] | None = None,
|
75 |
+
register_done_callback: Callable[['AgentHistoryList'], Awaitable[None]] | None = None,
|
76 |
+
register_external_agent_status_raise_error_callback: Callable[[], Awaitable[bool]] | None = None,
|
77 |
+
# Agent settings
|
78 |
+
use_vision: bool = True,
|
79 |
+
use_vision_for_planner: bool = False,
|
80 |
+
save_conversation_path: Optional[str] = None,
|
81 |
+
save_conversation_path_encoding: Optional[str] = 'utf-8',
|
82 |
+
max_failures: int = 3,
|
83 |
+
retry_delay: int = 10,
|
84 |
+
system_prompt_class: Type[SystemPrompt] = SystemPrompt,
|
85 |
+
agent_prompt_class: Type[AgentMessagePrompt] = AgentMessagePrompt,
|
86 |
+
max_input_tokens: int = 128000,
|
87 |
+
validate_output: bool = False,
|
88 |
+
message_context: Optional[str] = None,
|
89 |
+
generate_gif: bool | str = False,
|
90 |
+
available_file_paths: Optional[list[str]] = None,
|
91 |
+
include_attributes: list[str] = [
|
92 |
+
'title',
|
93 |
+
'type',
|
94 |
+
'name',
|
95 |
+
'role',
|
96 |
+
'aria-label',
|
97 |
+
'placeholder',
|
98 |
+
'value',
|
99 |
+
'alt',
|
100 |
+
'aria-expanded',
|
101 |
+
'data-date-format',
|
102 |
+
],
|
103 |
+
max_actions_per_step: int = 10,
|
104 |
+
tool_calling_method: Optional[ToolCallingMethod] = 'auto',
|
105 |
+
page_extraction_llm: Optional[BaseChatModel] = None,
|
106 |
+
planner_llm: Optional[BaseChatModel] = None,
|
107 |
+
planner_interval: int = 1, # Run planner every N steps
|
108 |
+
# Inject state
|
109 |
+
injected_agent_state: Optional[AgentState] = None,
|
110 |
+
context: Context | None = None,
|
111 |
+
):
|
112 |
+
super(CustomAgent, self).__init__(
|
113 |
+
task=task,
|
114 |
+
llm=llm,
|
115 |
+
browser=browser,
|
116 |
+
browser_context=browser_context,
|
117 |
+
controller=controller,
|
118 |
+
sensitive_data=sensitive_data,
|
119 |
+
initial_actions=initial_actions,
|
120 |
+
register_new_step_callback=register_new_step_callback,
|
121 |
+
register_done_callback=register_done_callback,
|
122 |
+
register_external_agent_status_raise_error_callback=register_external_agent_status_raise_error_callback,
|
123 |
+
use_vision=use_vision,
|
124 |
+
use_vision_for_planner=use_vision_for_planner,
|
125 |
+
save_conversation_path=save_conversation_path,
|
126 |
+
save_conversation_path_encoding=save_conversation_path_encoding,
|
127 |
+
max_failures=max_failures,
|
128 |
+
retry_delay=retry_delay,
|
129 |
+
system_prompt_class=system_prompt_class,
|
130 |
+
max_input_tokens=max_input_tokens,
|
131 |
+
validate_output=validate_output,
|
132 |
+
message_context=message_context,
|
133 |
+
generate_gif=generate_gif,
|
134 |
+
available_file_paths=available_file_paths,
|
135 |
+
include_attributes=include_attributes,
|
136 |
+
max_actions_per_step=max_actions_per_step,
|
137 |
+
tool_calling_method=tool_calling_method,
|
138 |
+
page_extraction_llm=page_extraction_llm,
|
139 |
+
planner_llm=planner_llm,
|
140 |
+
planner_interval=planner_interval,
|
141 |
+
injected_agent_state=injected_agent_state,
|
142 |
+
context=context,
|
143 |
+
)
|
144 |
+
self.state = injected_agent_state or CustomAgentState()
|
145 |
+
self.add_infos = add_infos
|
146 |
+
self._message_manager = CustomMessageManager(
|
147 |
+
task=task,
|
148 |
+
system_message=self.settings.system_prompt_class(
|
149 |
+
self.available_actions,
|
150 |
+
max_actions_per_step=self.settings.max_actions_per_step,
|
151 |
+
).get_system_message(),
|
152 |
+
settings=CustomMessageManagerSettings(
|
153 |
+
max_input_tokens=self.settings.max_input_tokens,
|
154 |
+
include_attributes=self.settings.include_attributes,
|
155 |
+
message_context=self.settings.message_context,
|
156 |
+
sensitive_data=sensitive_data,
|
157 |
+
available_file_paths=self.settings.available_file_paths,
|
158 |
+
agent_prompt_class=agent_prompt_class
|
159 |
+
),
|
160 |
+
state=self.state.message_manager_state,
|
161 |
+
)
|
162 |
+
|
163 |
+
def _log_response(self, response: CustomAgentOutput) -> None:
|
164 |
+
"""Log the model's response"""
|
165 |
+
if "Success" in response.current_state.evaluation_previous_goal:
|
166 |
+
emoji = "✅"
|
167 |
+
elif "Failed" in response.current_state.evaluation_previous_goal:
|
168 |
+
emoji = "❌"
|
169 |
+
else:
|
170 |
+
emoji = "🤷"
|
171 |
+
|
172 |
+
logger.info(f"{emoji} Eval: {response.current_state.evaluation_previous_goal}")
|
173 |
+
logger.info(f"🧠 New Memory: {response.current_state.important_contents}")
|
174 |
+
logger.info(f"🤔 Thought: {response.current_state.thought}")
|
175 |
+
logger.info(f"🎯 Next Goal: {response.current_state.next_goal}")
|
176 |
+
for i, action in enumerate(response.action):
|
177 |
+
logger.info(
|
178 |
+
f"🛠️ Action {i + 1}/{len(response.action)}: {action.model_dump_json(exclude_unset=True)}"
|
179 |
+
)
|
180 |
+
|
181 |
+
def _setup_action_models(self) -> None:
|
182 |
+
"""Setup dynamic action models from controller's registry"""
|
183 |
+
# Get the dynamic action model from controller's registry
|
184 |
+
self.ActionModel = self.controller.registry.create_action_model()
|
185 |
+
# Create output model with the dynamic actions
|
186 |
+
self.AgentOutput = CustomAgentOutput.type_with_custom_actions(self.ActionModel)
|
187 |
+
|
188 |
+
def update_step_info(
|
189 |
+
self, model_output: CustomAgentOutput, step_info: CustomAgentStepInfo = None
|
190 |
+
):
|
191 |
+
"""
|
192 |
+
update step info
|
193 |
+
"""
|
194 |
+
if step_info is None:
|
195 |
+
return
|
196 |
+
|
197 |
+
step_info.step_number += 1
|
198 |
+
important_contents = model_output.current_state.important_contents
|
199 |
+
if (
|
200 |
+
important_contents
|
201 |
+
and "None" not in important_contents
|
202 |
+
and important_contents not in step_info.memory
|
203 |
+
):
|
204 |
+
step_info.memory += important_contents + "\n"
|
205 |
+
|
206 |
+
logger.info(f"🧠 All Memory: \n{step_info.memory}")
|
207 |
+
|
208 |
+
@time_execution_async("--get_next_action")
|
209 |
+
async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
|
210 |
+
"""Get next action from LLM based on current state"""
|
211 |
+
fixed_input_messages = self._convert_input_messages(input_messages)
|
212 |
+
ai_message = self.llm.invoke(fixed_input_messages)
|
213 |
+
self.message_manager._add_message_with_tokens(ai_message)
|
214 |
+
|
215 |
+
if hasattr(ai_message, "reasoning_content"):
|
216 |
+
logger.info("🤯 Start Deep Thinking: ")
|
217 |
+
logger.info(ai_message.reasoning_content)
|
218 |
+
logger.info("🤯 End Deep Thinking")
|
219 |
+
|
220 |
+
if isinstance(ai_message.content, list):
|
221 |
+
ai_content = ai_message.content[0]
|
222 |
+
else:
|
223 |
+
ai_content = ai_message.content
|
224 |
+
|
225 |
+
try:
|
226 |
+
ai_content = ai_content.replace("```json", "").replace("```", "")
|
227 |
+
ai_content = repair_json(ai_content)
|
228 |
+
parsed_json = json.loads(ai_content)
|
229 |
+
parsed: AgentOutput = self.AgentOutput(**parsed_json)
|
230 |
+
except Exception as e:
|
231 |
+
import traceback
|
232 |
+
traceback.print_exc()
|
233 |
+
logger.debug(ai_message.content)
|
234 |
+
raise ValueError('Could not parse response.')
|
235 |
+
|
236 |
+
if parsed is None:
|
237 |
+
logger.debug(ai_message.content)
|
238 |
+
raise ValueError('Could not parse response.')
|
239 |
+
|
240 |
+
# cut the number of actions to max_actions_per_step if needed
|
241 |
+
if len(parsed.action) > self.settings.max_actions_per_step:
|
242 |
+
parsed.action = parsed.action[: self.settings.max_actions_per_step]
|
243 |
+
self._log_response(parsed)
|
244 |
+
return parsed
|
245 |
+
|
246 |
+
async def _run_planner(self) -> Optional[str]:
|
247 |
+
"""Run the planner to analyze state and suggest next steps"""
|
248 |
+
# Skip planning if no planner_llm is set
|
249 |
+
if not self.settings.planner_llm:
|
250 |
+
return None
|
251 |
+
|
252 |
+
# Create planner message history using full message history
|
253 |
+
planner_messages = [
|
254 |
+
PlannerPrompt(self.controller.registry.get_prompt_description()).get_system_message(),
|
255 |
+
*self.message_manager.get_messages()[1:], # Use full message history except the first
|
256 |
+
]
|
257 |
+
|
258 |
+
if not self.settings.use_vision_for_planner and self.settings.use_vision:
|
259 |
+
last_state_message: HumanMessage = planner_messages[-1]
|
260 |
+
# remove image from last state message
|
261 |
+
new_msg = ''
|
262 |
+
if isinstance(last_state_message.content, list):
|
263 |
+
for msg in last_state_message.content:
|
264 |
+
if msg['type'] == 'text':
|
265 |
+
new_msg += msg['text']
|
266 |
+
elif msg['type'] == 'image_url':
|
267 |
+
continue
|
268 |
+
else:
|
269 |
+
new_msg = last_state_message.content
|
270 |
+
|
271 |
+
planner_messages[-1] = HumanMessage(content=new_msg)
|
272 |
+
|
273 |
+
# Get planner output
|
274 |
+
response = await self.settings.planner_llm.ainvoke(planner_messages)
|
275 |
+
plan = str(response.content)
|
276 |
+
last_state_message = self.message_manager.get_messages()[-1]
|
277 |
+
if isinstance(last_state_message, HumanMessage):
|
278 |
+
# remove image from last state message
|
279 |
+
if isinstance(last_state_message.content, list):
|
280 |
+
for msg in last_state_message.content:
|
281 |
+
if msg['type'] == 'text':
|
282 |
+
msg['text'] += f"\nPlanning Agent outputs plans:\n {plan}\n"
|
283 |
+
else:
|
284 |
+
last_state_message.content += f"\nPlanning Agent outputs plans:\n {plan}\n "
|
285 |
+
|
286 |
+
try:
|
287 |
+
plan_json = json.loads(plan.replace("```json", "").replace("```", ""))
|
288 |
+
logger.info(f'📋 Plans:\n{json.dumps(plan_json, indent=4)}')
|
289 |
+
|
290 |
+
if hasattr(response, "reasoning_content"):
|
291 |
+
logger.info("🤯 Start Planning Deep Thinking: ")
|
292 |
+
logger.info(response.reasoning_content)
|
293 |
+
logger.info("🤯 End Planning Deep Thinking")
|
294 |
+
|
295 |
+
except json.JSONDecodeError:
|
296 |
+
logger.info(f'📋 Plans:\n{plan}')
|
297 |
+
except Exception as e:
|
298 |
+
logger.debug(f'Error parsing planning analysis: {e}')
|
299 |
+
logger.info(f'📋 Plans: {plan}')
|
300 |
+
return plan
|
301 |
+
|
302 |
+
@time_execution_async("--step")
|
303 |
+
async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
|
304 |
+
"""Execute one step of the task"""
|
305 |
+
logger.info(f"\n📍 Step {self.state.n_steps}")
|
306 |
+
state = None
|
307 |
+
model_output = None
|
308 |
+
result: list[ActionResult] = []
|
309 |
+
step_start_time = time.time()
|
310 |
+
tokens = 0
|
311 |
+
|
312 |
+
try:
|
313 |
+
state = await self.browser_context.get_state()
|
314 |
+
await self._raise_if_stopped_or_paused()
|
315 |
+
|
316 |
+
self.message_manager.add_state_message(state, self.state.last_action, self.state.last_result, step_info,
|
317 |
+
self.settings.use_vision)
|
318 |
+
|
319 |
+
# Run planner at specified intervals if planner is configured
|
320 |
+
if self.settings.planner_llm and self.state.n_steps % self.settings.planner_interval == 0:
|
321 |
+
await self._run_planner()
|
322 |
+
input_messages = self.message_manager.get_messages()
|
323 |
+
tokens = self._message_manager.state.history.current_tokens
|
324 |
+
|
325 |
+
try:
|
326 |
+
model_output = await self.get_next_action(input_messages)
|
327 |
+
self.update_step_info(model_output, step_info)
|
328 |
+
self.state.n_steps += 1
|
329 |
+
|
330 |
+
if self.register_new_step_callback:
|
331 |
+
await self.register_new_step_callback(state, model_output, self.state.n_steps)
|
332 |
+
|
333 |
+
if self.settings.save_conversation_path:
|
334 |
+
target = self.settings.save_conversation_path + f'_{self.state.n_steps}.txt'
|
335 |
+
save_conversation(input_messages, model_output, target,
|
336 |
+
self.settings.save_conversation_path_encoding)
|
337 |
+
|
338 |
+
if self.model_name != "deepseek-reasoner":
|
339 |
+
# remove prev message
|
340 |
+
self.message_manager._remove_state_message_by_index(-1)
|
341 |
+
await self._raise_if_stopped_or_paused()
|
342 |
+
except Exception as e:
|
343 |
+
# model call failed, remove last state message from history
|
344 |
+
self.message_manager._remove_state_message_by_index(-1)
|
345 |
+
raise e
|
346 |
+
|
347 |
+
result: list[ActionResult] = await self.multi_act(model_output.action)
|
348 |
+
for ret_ in result:
|
349 |
+
if ret_.extracted_content and "Extracted page" in ret_.extracted_content:
|
350 |
+
# record every extracted page
|
351 |
+
if ret_.extracted_content[:100] not in self.state.extracted_content:
|
352 |
+
self.state.extracted_content += ret_.extracted_content
|
353 |
+
self.state.last_result = result
|
354 |
+
self.state.last_action = model_output.action
|
355 |
+
if len(result) > 0 and result[-1].is_done:
|
356 |
+
if not self.state.extracted_content:
|
357 |
+
self.state.extracted_content = step_info.memory
|
358 |
+
result[-1].extracted_content = self.state.extracted_content
|
359 |
+
logger.info(f"📄 Result: {result[-1].extracted_content}")
|
360 |
+
|
361 |
+
self.state.consecutive_failures = 0
|
362 |
+
|
363 |
+
except InterruptedError:
|
364 |
+
logger.debug('Agent paused')
|
365 |
+
self.state.last_result = [
|
366 |
+
ActionResult(
|
367 |
+
error='The agent was paused - now continuing actions might need to be repeated',
|
368 |
+
include_in_memory=True
|
369 |
+
)
|
370 |
+
]
|
371 |
+
return
|
372 |
+
|
373 |
+
except Exception as e:
|
374 |
+
result = await self._handle_step_error(e)
|
375 |
+
self.state.last_result = result
|
376 |
+
|
377 |
+
finally:
|
378 |
+
step_end_time = time.time()
|
379 |
+
actions = [a.model_dump(exclude_unset=True) for a in model_output.action] if model_output else []
|
380 |
+
self.telemetry.capture(
|
381 |
+
AgentStepTelemetryEvent(
|
382 |
+
agent_id=self.state.agent_id,
|
383 |
+
step=self.state.n_steps,
|
384 |
+
actions=actions,
|
385 |
+
consecutive_failures=self.state.consecutive_failures,
|
386 |
+
step_error=[r.error for r in result if r.error] if result else ['No result'],
|
387 |
+
)
|
388 |
+
)
|
389 |
+
if not result:
|
390 |
+
return
|
391 |
+
|
392 |
+
if state:
|
393 |
+
metadata = StepMetadata(
|
394 |
+
step_number=self.state.n_steps,
|
395 |
+
step_start_time=step_start_time,
|
396 |
+
step_end_time=step_end_time,
|
397 |
+
input_tokens=tokens,
|
398 |
+
)
|
399 |
+
self._make_history_item(model_output, state, result, metadata)
|
400 |
+
|
401 |
+
async def run(self, max_steps: int = 100) -> AgentHistoryList:
|
402 |
+
"""Execute the task with maximum number of steps"""
|
403 |
+
try:
|
404 |
+
self._log_agent_run()
|
405 |
+
|
406 |
+
# Execute initial actions if provided
|
407 |
+
if self.initial_actions:
|
408 |
+
result = await self.multi_act(self.initial_actions, check_for_new_elements=False)
|
409 |
+
self.state.last_result = result
|
410 |
+
|
411 |
+
step_info = CustomAgentStepInfo(
|
412 |
+
task=self.task,
|
413 |
+
add_infos=self.add_infos,
|
414 |
+
step_number=1,
|
415 |
+
max_steps=max_steps,
|
416 |
+
memory="",
|
417 |
+
)
|
418 |
+
|
419 |
+
for step in range(max_steps):
|
420 |
+
# Check if we should stop due to too many failures
|
421 |
+
if self.state.consecutive_failures >= self.settings.max_failures:
|
422 |
+
logger.error(f'❌ Stopping due to {self.settings.max_failures} consecutive failures')
|
423 |
+
break
|
424 |
+
|
425 |
+
# Check control flags before each step
|
426 |
+
if self.state.stopped:
|
427 |
+
logger.info('Agent stopped')
|
428 |
+
break
|
429 |
+
|
430 |
+
while self.state.paused:
|
431 |
+
await asyncio.sleep(0.2) # Small delay to prevent CPU spinning
|
432 |
+
if self.state.stopped: # Allow stopping while paused
|
433 |
+
break
|
434 |
+
|
435 |
+
await self.step(step_info)
|
436 |
+
|
437 |
+
if self.state.history.is_done():
|
438 |
+
if self.settings.validate_output and step < max_steps - 1:
|
439 |
+
if not await self._validate_output():
|
440 |
+
continue
|
441 |
+
|
442 |
+
await self.log_completion()
|
443 |
+
break
|
444 |
+
else:
|
445 |
+
logger.info("❌ Failed to complete task in maximum steps")
|
446 |
+
if not self.state.extracted_content:
|
447 |
+
self.state.history.history[-1].result[-1].extracted_content = step_info.memory
|
448 |
+
else:
|
449 |
+
self.state.history.history[-1].result[-1].extracted_content = self.state.extracted_content
|
450 |
+
|
451 |
+
return self.state.history
|
452 |
+
|
453 |
+
finally:
|
454 |
+
self.telemetry.capture(
|
455 |
+
AgentEndTelemetryEvent(
|
456 |
+
agent_id=self.state.agent_id,
|
457 |
+
is_done=self.state.history.is_done(),
|
458 |
+
success=self.state.history.is_successful(),
|
459 |
+
steps=self.state.n_steps,
|
460 |
+
max_steps_reached=self.state.n_steps >= max_steps,
|
461 |
+
errors=self.state.history.errors(),
|
462 |
+
total_input_tokens=self.state.history.total_input_tokens(),
|
463 |
+
total_duration_seconds=self.state.history.total_duration_seconds(),
|
464 |
+
)
|
465 |
+
)
|
466 |
+
|
467 |
+
if not self.injected_browser_context:
|
468 |
+
await self.browser_context.close()
|
469 |
+
|
470 |
+
if not self.injected_browser and self.browser:
|
471 |
+
await self.browser.close()
|
472 |
+
|
473 |
+
if self.settings.generate_gif:
|
474 |
+
output_path: str = 'agent_history.gif'
|
475 |
+
if isinstance(self.settings.generate_gif, str):
|
476 |
+
output_path = self.settings.generate_gif
|
477 |
+
|
478 |
+
create_history_gif(task=self.task, history=self.state.history, output_path=output_path)
|
src/agent/custom_message_manager.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import logging
|
4 |
+
import pdb
|
5 |
+
from typing import List, Optional, Type, Dict
|
6 |
+
|
7 |
+
from browser_use.agent.message_manager.service import MessageManager
|
8 |
+
from browser_use.agent.message_manager.views import MessageHistory
|
9 |
+
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
|
10 |
+
from browser_use.agent.views import ActionResult, AgentStepInfo, ActionModel
|
11 |
+
from browser_use.browser.views import BrowserState
|
12 |
+
from browser_use.agent.message_manager.service import MessageManagerSettings
|
13 |
+
from browser_use.agent.views import ActionResult, AgentOutput, AgentStepInfo, MessageManagerState
|
14 |
+
from langchain_core.language_models import BaseChatModel
|
15 |
+
from langchain_anthropic import ChatAnthropic
|
16 |
+
from langchain_core.language_models import BaseChatModel
|
17 |
+
from langchain_core.messages import (
|
18 |
+
AIMessage,
|
19 |
+
BaseMessage,
|
20 |
+
HumanMessage,
|
21 |
+
ToolMessage,
|
22 |
+
SystemMessage
|
23 |
+
)
|
24 |
+
from langchain_openai import ChatOpenAI
|
25 |
+
from ..utils.llm import DeepSeekR1ChatOpenAI
|
26 |
+
from .custom_prompts import CustomAgentMessagePrompt
|
27 |
+
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
class CustomMessageManagerSettings(MessageManagerSettings):
|
32 |
+
agent_prompt_class: Type[AgentMessagePrompt] = AgentMessagePrompt
|
33 |
+
|
34 |
+
|
35 |
+
class CustomMessageManager(MessageManager):
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
task: str,
|
39 |
+
system_message: SystemMessage,
|
40 |
+
settings: MessageManagerSettings = MessageManagerSettings(),
|
41 |
+
state: MessageManagerState = MessageManagerState(),
|
42 |
+
):
|
43 |
+
super().__init__(
|
44 |
+
task=task,
|
45 |
+
system_message=system_message,
|
46 |
+
settings=settings,
|
47 |
+
state=state
|
48 |
+
)
|
49 |
+
|
50 |
+
def _init_messages(self) -> None:
|
51 |
+
"""Initialize the message history with system message, context, task, and other initial messages"""
|
52 |
+
self._add_message_with_tokens(self.system_prompt)
|
53 |
+
self.context_content = ""
|
54 |
+
|
55 |
+
if self.settings.message_context:
|
56 |
+
self.context_content += 'Context for the task' + self.settings.message_context
|
57 |
+
|
58 |
+
if self.settings.sensitive_data:
|
59 |
+
info = f'Here are placeholders for sensitive data: {list(self.settings.sensitive_data.keys())}'
|
60 |
+
info += 'To use them, write <secret>the placeholder name</secret>'
|
61 |
+
self.context_content += info
|
62 |
+
|
63 |
+
if self.settings.available_file_paths:
|
64 |
+
filepaths_msg = f'Here are file paths you can use: {self.settings.available_file_paths}'
|
65 |
+
self.context_content += filepaths_msg
|
66 |
+
|
67 |
+
if self.context_content:
|
68 |
+
context_message = HumanMessage(content=self.context_content)
|
69 |
+
self._add_message_with_tokens(context_message)
|
70 |
+
|
71 |
+
def cut_messages(self):
|
72 |
+
"""Get current message list, potentially trimmed to max tokens"""
|
73 |
+
diff = self.state.history.current_tokens - self.settings.max_input_tokens
|
74 |
+
min_message_len = 2 if self.context_content is not None else 1
|
75 |
+
|
76 |
+
while diff > 0 and len(self.state.history.messages) > min_message_len:
|
77 |
+
msg = self.state.history.messages.pop(min_message_len)
|
78 |
+
self.state.history.current_tokens -= msg.metadata.tokens
|
79 |
+
diff = self.state.history.current_tokens - self.settings.max_input_tokens
|
80 |
+
|
81 |
+
def add_state_message(
|
82 |
+
self,
|
83 |
+
state: BrowserState,
|
84 |
+
actions: Optional[List[ActionModel]] = None,
|
85 |
+
result: Optional[List[ActionResult]] = None,
|
86 |
+
step_info: Optional[AgentStepInfo] = None,
|
87 |
+
use_vision=True,
|
88 |
+
) -> None:
|
89 |
+
"""Add browser state as human message"""
|
90 |
+
# otherwise add state message and result to next message (which will not stay in memory)
|
91 |
+
state_message = self.settings.agent_prompt_class(
|
92 |
+
state,
|
93 |
+
actions,
|
94 |
+
result,
|
95 |
+
include_attributes=self.settings.include_attributes,
|
96 |
+
step_info=step_info,
|
97 |
+
).get_user_message(use_vision)
|
98 |
+
self._add_message_with_tokens(state_message)
|
99 |
+
|
100 |
+
def _remove_state_message_by_index(self, remove_ind=-1) -> None:
|
101 |
+
"""Remove state message by index from history"""
|
102 |
+
i = len(self.state.history.messages) - 1
|
103 |
+
remove_cnt = 0
|
104 |
+
while i >= 0:
|
105 |
+
if isinstance(self.state.history.messages[i].message, HumanMessage):
|
106 |
+
remove_cnt += 1
|
107 |
+
if remove_cnt == abs(remove_ind):
|
108 |
+
msg = self.state.history.messages.pop(i)
|
109 |
+
self.state.history.current_tokens -= msg.metadata.tokens
|
110 |
+
break
|
111 |
+
i -= 1
|
src/agent/custom_prompts.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
from typing import List, Optional
|
3 |
+
|
4 |
+
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
|
5 |
+
from browser_use.agent.views import ActionResult, ActionModel
|
6 |
+
from browser_use.browser.views import BrowserState
|
7 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
8 |
+
from datetime import datetime
|
9 |
+
import importlib
|
10 |
+
|
11 |
+
from .custom_views import CustomAgentStepInfo
|
12 |
+
|
13 |
+
|
14 |
+
class CustomSystemPrompt(SystemPrompt):
|
15 |
+
def _load_prompt_template(self) -> None:
|
16 |
+
"""Load the prompt template from the markdown file."""
|
17 |
+
try:
|
18 |
+
# This works both in development and when installed as a package
|
19 |
+
with importlib.resources.files('src.agent').joinpath('custom_system_prompt.md').open('r') as f:
|
20 |
+
self.prompt_template = f.read()
|
21 |
+
except Exception as e:
|
22 |
+
raise RuntimeError(f'Failed to load system prompt template: {e}')
|
23 |
+
|
24 |
+
def get_system_message(self) -> SystemMessage:
|
25 |
+
"""
|
26 |
+
Get the system prompt for the agent.
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
SystemMessage: Formatted system prompt
|
30 |
+
"""
|
31 |
+
prompt = self.prompt_template.format(max_actions=self.max_actions_per_step,
|
32 |
+
available_actions=self.default_action_description)
|
33 |
+
|
34 |
+
return SystemMessage(content=prompt)
|
35 |
+
|
36 |
+
|
37 |
+
class CustomAgentMessagePrompt(AgentMessagePrompt):
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
state: BrowserState,
|
41 |
+
actions: Optional[List[ActionModel]] = None,
|
42 |
+
result: Optional[List[ActionResult]] = None,
|
43 |
+
include_attributes: list[str] = [],
|
44 |
+
step_info: Optional[CustomAgentStepInfo] = None,
|
45 |
+
):
|
46 |
+
super(CustomAgentMessagePrompt, self).__init__(state=state,
|
47 |
+
result=result,
|
48 |
+
include_attributes=include_attributes,
|
49 |
+
step_info=step_info
|
50 |
+
)
|
51 |
+
self.actions = actions
|
52 |
+
|
53 |
+
def get_user_message(self, use_vision: bool = True) -> HumanMessage:
|
54 |
+
if self.step_info:
|
55 |
+
step_info_description = f'Current step: {self.step_info.step_number}/{self.step_info.max_steps}\n'
|
56 |
+
else:
|
57 |
+
step_info_description = ''
|
58 |
+
|
59 |
+
time_str = datetime.now().strftime("%Y-%m-%d %H:%M")
|
60 |
+
step_info_description += f"Current date and time: {time_str}"
|
61 |
+
|
62 |
+
elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
|
63 |
+
|
64 |
+
has_content_above = (self.state.pixels_above or 0) > 0
|
65 |
+
has_content_below = (self.state.pixels_below or 0) > 0
|
66 |
+
|
67 |
+
if elements_text != '':
|
68 |
+
if has_content_above:
|
69 |
+
elements_text = (
|
70 |
+
f'... {self.state.pixels_above} pixels above - scroll or extract content to see more ...\n{elements_text}'
|
71 |
+
)
|
72 |
+
else:
|
73 |
+
elements_text = f'[Start of page]\n{elements_text}'
|
74 |
+
if has_content_below:
|
75 |
+
elements_text = (
|
76 |
+
f'{elements_text}\n... {self.state.pixels_below} pixels below - scroll or extract content to see more ...'
|
77 |
+
)
|
78 |
+
else:
|
79 |
+
elements_text = f'{elements_text}\n[End of page]'
|
80 |
+
else:
|
81 |
+
elements_text = 'empty page'
|
82 |
+
|
83 |
+
state_description = f"""
|
84 |
+
{step_info_description}
|
85 |
+
1. Task: {self.step_info.task}.
|
86 |
+
2. Hints(Optional):
|
87 |
+
{self.step_info.add_infos}
|
88 |
+
3. Memory:
|
89 |
+
{self.step_info.memory}
|
90 |
+
4. Current url: {self.state.url}
|
91 |
+
5. Available tabs:
|
92 |
+
{self.state.tabs}
|
93 |
+
6. Interactive elements:
|
94 |
+
{elements_text}
|
95 |
+
"""
|
96 |
+
|
97 |
+
if self.actions and self.result:
|
98 |
+
state_description += "\n **Previous Actions** \n"
|
99 |
+
state_description += f'Previous step: {self.step_info.step_number - 1}/{self.step_info.max_steps} \n'
|
100 |
+
for i, result in enumerate(self.result):
|
101 |
+
action = self.actions[i]
|
102 |
+
state_description += f"Previous action {i + 1}/{len(self.result)}: {action.model_dump_json(exclude_unset=True)}\n"
|
103 |
+
if result.error:
|
104 |
+
# only use last 300 characters of error
|
105 |
+
error = result.error.split('\n')[-1]
|
106 |
+
state_description += (
|
107 |
+
f"Error of previous action {i + 1}/{len(self.result)}: ...{error}\n"
|
108 |
+
)
|
109 |
+
if result.include_in_memory:
|
110 |
+
if result.extracted_content:
|
111 |
+
state_description += f"Result of previous action {i + 1}/{len(self.result)}: {result.extracted_content}\n"
|
112 |
+
|
113 |
+
if self.state.screenshot and use_vision == True:
|
114 |
+
# Format message for vision model
|
115 |
+
return HumanMessage(
|
116 |
+
content=[
|
117 |
+
{'type': 'text', 'text': state_description},
|
118 |
+
{
|
119 |
+
'type': 'image_url',
|
120 |
+
'image_url': {'url': f'data:image/png;base64,{self.state.screenshot}'},
|
121 |
+
},
|
122 |
+
]
|
123 |
+
)
|
124 |
+
|
125 |
+
return HumanMessage(content=state_description)
|
src/agent/custom_system_prompt.md
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.
|
2 |
+
|
3 |
+
# Input Format
|
4 |
+
Task
|
5 |
+
Previous steps
|
6 |
+
Current URL
|
7 |
+
Open Tabs
|
8 |
+
Interactive Elements
|
9 |
+
[index]<type>text</type>
|
10 |
+
- index: Numeric identifier for interaction
|
11 |
+
- type: HTML element type (button, input, etc.)
|
12 |
+
- text: Element description
|
13 |
+
Example:
|
14 |
+
[33]<button>Submit Form</button>
|
15 |
+
|
16 |
+
- Only elements with numeric indexes in [] are interactive
|
17 |
+
- elements without [] provide only context
|
18 |
+
|
19 |
+
# Response Rules
|
20 |
+
1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
|
21 |
+
{{
|
22 |
+
"current_state": {{
|
23 |
+
"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not.",
|
24 |
+
"important_contents": "Output important contents closely related to user\'s instruction on the current page. If there is, please output the contents. If not, please output empty string ''.",
|
25 |
+
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of evaluation_previous_goal is 'Failed', please reflect and output your reflection here.",
|
26 |
+
"next_goal": "Please generate a brief natural language description for the goal of your next actions based on your thought."
|
27 |
+
}},
|
28 |
+
"action": [
|
29 |
+
{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence
|
30 |
+
]
|
31 |
+
}}
|
32 |
+
|
33 |
+
2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {max_actions} actions per sequence.
|
34 |
+
Common action sequences:
|
35 |
+
- Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}]
|
36 |
+
- Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}]
|
37 |
+
- Actions are executed in the given order
|
38 |
+
- If the page changes after an action, the sequence is interrupted and you get the new state.
|
39 |
+
- Only provide the action sequence until an action which changes the page state significantly.
|
40 |
+
- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page
|
41 |
+
- only use multiple actions if it makes sense.
|
42 |
+
- Only chose from below available actions.
|
43 |
+
|
44 |
+
3. ELEMENT INTERACTION:
|
45 |
+
- Only use indexes of the interactive elements
|
46 |
+
- Elements marked with "[]Non-interactive text" are non-interactive
|
47 |
+
|
48 |
+
4. NAVIGATION & ERROR HANDLING:
|
49 |
+
- If no suitable elements exist, use other functions to complete the task
|
50 |
+
- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.
|
51 |
+
- Handle popups/cookies by accepting or closing them
|
52 |
+
- Use scroll to find elements you are looking for
|
53 |
+
- If you want to research something, open a new tab instead of using the current tab
|
54 |
+
- If captcha pops up, try to solve it - else try a different approach
|
55 |
+
- If the page is not fully loaded, use wait action
|
56 |
+
|
57 |
+
5. TASK COMPLETION:
|
58 |
+
- Use the done action as the last action as soon as the ultimate task is complete
|
59 |
+
- Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps.
|
60 |
+
- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!
|
61 |
+
- If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.
|
62 |
+
- Don't hallucinate actions
|
63 |
+
- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.
|
64 |
+
|
65 |
+
6. VISUAL CONTEXT:
|
66 |
+
- When an image is provided, use it to understand the page layout
|
67 |
+
- Bounding boxes with labels on their top right corner correspond to element indexes
|
68 |
+
|
69 |
+
7. Form filling:
|
70 |
+
- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.
|
71 |
+
|
72 |
+
8. Long tasks:
|
73 |
+
- Keep track of the status and subresults in the memory.
|
74 |
+
|
75 |
+
9. Extraction:
|
76 |
+
- If your task is to find information - call extract_content on the specific pages to get and store the information.
|
77 |
+
Your responses must be always JSON with the specified format.
|
78 |
+
|
79 |
+
Available Actions:
|
80 |
+
{available_actions}
|
src/agent/custom_views.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Any, Dict, List, Literal, Optional, Type
|
3 |
+
import uuid
|
4 |
+
|
5 |
+
from browser_use.agent.views import AgentOutput, AgentState, ActionResult, AgentHistoryList, MessageManagerState
|
6 |
+
from browser_use.controller.registry.views import ActionModel
|
7 |
+
from pydantic import BaseModel, ConfigDict, Field, create_model
|
8 |
+
|
9 |
+
|
10 |
+
@dataclass
|
11 |
+
class CustomAgentStepInfo:
|
12 |
+
step_number: int
|
13 |
+
max_steps: int
|
14 |
+
task: str
|
15 |
+
add_infos: str
|
16 |
+
memory: str
|
17 |
+
|
18 |
+
|
19 |
+
class CustomAgentBrain(BaseModel):
|
20 |
+
"""Current state of the agent"""
|
21 |
+
|
22 |
+
evaluation_previous_goal: str
|
23 |
+
important_contents: str
|
24 |
+
thought: str
|
25 |
+
next_goal: str
|
26 |
+
|
27 |
+
|
28 |
+
class CustomAgentOutput(AgentOutput):
|
29 |
+
"""Output model for agent
|
30 |
+
|
31 |
+
@dev note: this model is extended with custom actions in AgentService. You can also use some fields that are not in this model as provided by the linter, as long as they are registered in the DynamicActions model.
|
32 |
+
"""
|
33 |
+
|
34 |
+
current_state: CustomAgentBrain
|
35 |
+
|
36 |
+
@staticmethod
|
37 |
+
def type_with_custom_actions(
|
38 |
+
custom_actions: Type[ActionModel],
|
39 |
+
) -> Type["CustomAgentOutput"]:
|
40 |
+
"""Extend actions with custom actions"""
|
41 |
+
model_ = create_model(
|
42 |
+
"CustomAgentOutput",
|
43 |
+
__base__=CustomAgentOutput,
|
44 |
+
action=(
|
45 |
+
list[custom_actions],
|
46 |
+
Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}),
|
47 |
+
), # Properly annotated field with no default
|
48 |
+
__module__=CustomAgentOutput.__module__,
|
49 |
+
)
|
50 |
+
model_.__doc__ = 'AgentOutput model with custom actions'
|
51 |
+
return model_
|
52 |
+
|
53 |
+
|
54 |
+
class CustomAgentState(BaseModel):
|
55 |
+
agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
56 |
+
n_steps: int = 1
|
57 |
+
consecutive_failures: int = 0
|
58 |
+
last_result: Optional[List['ActionResult']] = None
|
59 |
+
history: AgentHistoryList = Field(default_factory=lambda: AgentHistoryList(history=[]))
|
60 |
+
last_plan: Optional[str] = None
|
61 |
+
paused: bool = False
|
62 |
+
stopped: bool = False
|
63 |
+
|
64 |
+
message_manager_state: MessageManagerState = Field(default_factory=MessageManagerState)
|
65 |
+
|
66 |
+
last_action: Optional[List['ActionModel']] = None
|
67 |
+
extracted_content: str = ''
|
src/browser/__init__.py
ADDED
File without changes
|
src/browser/custom_browser.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import pdb
|
3 |
+
|
4 |
+
from playwright.async_api import Browser as PlaywrightBrowser
|
5 |
+
from playwright.async_api import (
|
6 |
+
BrowserContext as PlaywrightBrowserContext,
|
7 |
+
)
|
8 |
+
from playwright.async_api import (
|
9 |
+
Playwright,
|
10 |
+
async_playwright,
|
11 |
+
)
|
12 |
+
from browser_use.browser.browser import Browser
|
13 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
14 |
+
from playwright.async_api import BrowserContext as PlaywrightBrowserContext
|
15 |
+
import logging
|
16 |
+
|
17 |
+
from .custom_context import CustomBrowserContext
|
18 |
+
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
class CustomBrowser(Browser):
|
23 |
+
|
24 |
+
async def new_context(
|
25 |
+
self,
|
26 |
+
config: BrowserContextConfig = BrowserContextConfig()
|
27 |
+
) -> CustomBrowserContext:
|
28 |
+
return CustomBrowserContext(config=config, browser=self)
|
src/browser/custom_context.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
|
5 |
+
from browser_use.browser.browser import Browser
|
6 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
7 |
+
from playwright.async_api import Browser as PlaywrightBrowser
|
8 |
+
from playwright.async_api import BrowserContext as PlaywrightBrowserContext
|
9 |
+
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
class CustomBrowserContext(BrowserContext):
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
browser: "Browser",
|
17 |
+
config: BrowserContextConfig = BrowserContextConfig()
|
18 |
+
):
|
19 |
+
super(CustomBrowserContext, self).__init__(browser=browser, config=config)
|
src/controller/__init__.py
ADDED
File without changes
|
src/controller/custom_controller.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
|
3 |
+
import pyperclip
|
4 |
+
from typing import Optional, Type
|
5 |
+
from pydantic import BaseModel
|
6 |
+
from browser_use.agent.views import ActionResult
|
7 |
+
from browser_use.browser.context import BrowserContext
|
8 |
+
from browser_use.controller.service import Controller, DoneAction
|
9 |
+
from main_content_extractor import MainContentExtractor
|
10 |
+
from browser_use.controller.views import (
|
11 |
+
ClickElementAction,
|
12 |
+
DoneAction,
|
13 |
+
ExtractPageContentAction,
|
14 |
+
GoToUrlAction,
|
15 |
+
InputTextAction,
|
16 |
+
OpenTabAction,
|
17 |
+
ScrollAction,
|
18 |
+
SearchGoogleAction,
|
19 |
+
SendKeysAction,
|
20 |
+
SwitchTabAction,
|
21 |
+
)
|
22 |
+
import logging
|
23 |
+
|
24 |
+
logger = logging.getLogger(__name__)
|
25 |
+
|
26 |
+
|
27 |
+
class CustomController(Controller):
|
28 |
+
def __init__(self, exclude_actions: list[str] = [],
|
29 |
+
output_model: Optional[Type[BaseModel]] = None
|
30 |
+
):
|
31 |
+
super().__init__(exclude_actions=exclude_actions, output_model=output_model)
|
32 |
+
self._register_custom_actions()
|
33 |
+
|
34 |
+
def _register_custom_actions(self):
|
35 |
+
"""Register all custom browser actions"""
|
36 |
+
|
37 |
+
@self.registry.action("Copy text to clipboard")
|
38 |
+
def copy_to_clipboard(text: str):
|
39 |
+
pyperclip.copy(text)
|
40 |
+
return ActionResult(extracted_content=text)
|
41 |
+
|
42 |
+
@self.registry.action("Paste text from clipboard")
|
43 |
+
async def paste_from_clipboard(browser: BrowserContext):
|
44 |
+
text = pyperclip.paste()
|
45 |
+
# send text to browser
|
46 |
+
page = await browser.get_current_page()
|
47 |
+
await page.keyboard.type(text)
|
48 |
+
|
49 |
+
return ActionResult(extracted_content=text)
|
src/utils/__init__.py
ADDED
File without changes
|
src/utils/agent_state.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
|
3 |
+
|
4 |
+
class AgentState:
|
5 |
+
_instance = None
|
6 |
+
|
7 |
+
def __init__(self):
|
8 |
+
if not hasattr(self, '_stop_requested'):
|
9 |
+
self._stop_requested = asyncio.Event()
|
10 |
+
self.last_valid_state = None # store the last valid browser state
|
11 |
+
|
12 |
+
def __new__(cls):
|
13 |
+
if cls._instance is None:
|
14 |
+
cls._instance = super(AgentState, cls).__new__(cls)
|
15 |
+
return cls._instance
|
16 |
+
|
17 |
+
def request_stop(self):
|
18 |
+
self._stop_requested.set()
|
19 |
+
|
20 |
+
def clear_stop(self):
|
21 |
+
self._stop_requested.clear()
|
22 |
+
self.last_valid_state = None
|
23 |
+
|
24 |
+
def is_stop_requested(self):
|
25 |
+
return self._stop_requested.is_set()
|
26 |
+
|
27 |
+
def set_last_valid_state(self, state):
|
28 |
+
self.last_valid_state = state
|
29 |
+
|
30 |
+
def get_last_valid_state(self):
|
31 |
+
return self.last_valid_state
|
src/utils/deep_research.py
ADDED
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
import asyncio
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
import logging
|
10 |
+
from pprint import pprint
|
11 |
+
from uuid import uuid4
|
12 |
+
from src.utils import utils
|
13 |
+
from src.agent.custom_agent import CustomAgent
|
14 |
+
import json
|
15 |
+
import re
|
16 |
+
from browser_use.agent.service import Agent
|
17 |
+
from browser_use.browser.browser import BrowserConfig, Browser
|
18 |
+
from browser_use.agent.views import ActionResult
|
19 |
+
from browser_use.browser.context import BrowserContext
|
20 |
+
from browser_use.controller.service import Controller, DoneAction
|
21 |
+
from main_content_extractor import MainContentExtractor
|
22 |
+
from langchain_core.messages import (
|
23 |
+
AIMessage,
|
24 |
+
BaseMessage,
|
25 |
+
HumanMessage,
|
26 |
+
ToolMessage,
|
27 |
+
SystemMessage
|
28 |
+
)
|
29 |
+
from json_repair import repair_json
|
30 |
+
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
31 |
+
from src.controller.custom_controller import CustomController
|
32 |
+
from src.browser.custom_browser import CustomBrowser
|
33 |
+
from src.browser.custom_context import BrowserContextConfig, BrowserContext
|
34 |
+
from browser_use.browser.context import (
|
35 |
+
BrowserContextConfig,
|
36 |
+
BrowserContextWindowSize,
|
37 |
+
)
|
38 |
+
|
39 |
+
logger = logging.getLogger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
async def deep_research(task, llm, agent_state=None, **kwargs):
|
43 |
+
task_id = str(uuid4())
|
44 |
+
save_dir = kwargs.get("save_dir", os.path.join(f"./tmp/deep_research/{task_id}"))
|
45 |
+
logger.info(f"Save Deep Research at: {save_dir}")
|
46 |
+
os.makedirs(save_dir, exist_ok=True)
|
47 |
+
|
48 |
+
# max qyery num per iteration
|
49 |
+
max_query_num = kwargs.get("max_query_num", 3)
|
50 |
+
|
51 |
+
use_own_browser = kwargs.get("use_own_browser", False)
|
52 |
+
extra_chromium_args = []
|
53 |
+
|
54 |
+
if use_own_browser:
|
55 |
+
cdp_url = os.getenv("CHROME_CDP", kwargs.get("chrome_cdp", None))
|
56 |
+
# TODO: if use own browser, max query num must be 1 per iter, how to solve it?
|
57 |
+
max_query_num = 1
|
58 |
+
chrome_path = os.getenv("CHROME_PATH", None)
|
59 |
+
if chrome_path == "":
|
60 |
+
chrome_path = None
|
61 |
+
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
|
62 |
+
if chrome_user_data:
|
63 |
+
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
|
64 |
+
|
65 |
+
browser = CustomBrowser(
|
66 |
+
config=BrowserConfig(
|
67 |
+
headless=kwargs.get("headless", False),
|
68 |
+
cdp_url=cdp_url,
|
69 |
+
disable_security=kwargs.get("disable_security", True),
|
70 |
+
chrome_instance_path=chrome_path,
|
71 |
+
extra_chromium_args=extra_chromium_args,
|
72 |
+
)
|
73 |
+
)
|
74 |
+
browser_context = await browser.new_context()
|
75 |
+
else:
|
76 |
+
browser = None
|
77 |
+
browser_context = None
|
78 |
+
|
79 |
+
controller = CustomController()
|
80 |
+
|
81 |
+
@controller.registry.action(
|
82 |
+
'Extract page content to get the pure markdown.',
|
83 |
+
)
|
84 |
+
async def extract_content(browser: BrowserContext):
|
85 |
+
page = await browser.get_current_page()
|
86 |
+
# use jina reader
|
87 |
+
url = page.url
|
88 |
+
|
89 |
+
jina_url = f"https://r.jina.ai/{url}"
|
90 |
+
await page.goto(jina_url)
|
91 |
+
output_format = 'markdown'
|
92 |
+
content = MainContentExtractor.extract( # type: ignore
|
93 |
+
html=await page.content(),
|
94 |
+
output_format=output_format,
|
95 |
+
)
|
96 |
+
# go back to org url
|
97 |
+
await page.go_back()
|
98 |
+
msg = f'Extracted page content:\n{content}\n'
|
99 |
+
logger.info(msg)
|
100 |
+
return ActionResult(extracted_content=msg)
|
101 |
+
|
102 |
+
search_system_prompt = f"""
|
103 |
+
You are a **Deep Researcher**, an AI agent specializing in in-depth information gathering and research using a web browser with **automated execution capabilities**. Your expertise lies in formulating comprehensive research plans and executing them meticulously to fulfill complex user requests. You will analyze user instructions, devise a detailed research plan, and determine the necessary search queries to gather the required information.
|
104 |
+
|
105 |
+
**Your Task:**
|
106 |
+
|
107 |
+
Given a user's research topic, you will:
|
108 |
+
|
109 |
+
1. **Develop a Research Plan:** Outline the key aspects and subtopics that need to be investigated to thoroughly address the user's request. This plan should be a high-level overview of the research direction.
|
110 |
+
2. **Generate Search Queries:** Based on your research plan, generate a list of specific search queries to be executed in a web browser. These queries should be designed to efficiently gather relevant information for each aspect of your plan.
|
111 |
+
|
112 |
+
**Output Format:**
|
113 |
+
|
114 |
+
Your output will be a JSON object with the following structure:
|
115 |
+
|
116 |
+
```json
|
117 |
+
{{
|
118 |
+
"plan": "A concise, high-level research plan outlining the key areas to investigate.",
|
119 |
+
"queries": [
|
120 |
+
"search query 1",
|
121 |
+
"search query 2",
|
122 |
+
//... up to a maximum of {max_query_num} search queries
|
123 |
+
]
|
124 |
+
}}
|
125 |
+
```
|
126 |
+
|
127 |
+
**Important:**
|
128 |
+
|
129 |
+
* Limit your output to a **maximum of {max_query_num}** search queries.
|
130 |
+
* Make the search queries to help the automated agent find the needed information. Consider what keywords are most likely to lead to useful results.
|
131 |
+
* If you have gathered for all the information you want and no further search queries are required, output queries with an empty list: `[]`
|
132 |
+
* Make sure output search queries are different from the history queries.
|
133 |
+
|
134 |
+
**Inputs:**
|
135 |
+
|
136 |
+
1. **User Instruction:** The original instruction given by the user.
|
137 |
+
2. **Previous Queries:** History Queries.
|
138 |
+
3. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
|
139 |
+
"""
|
140 |
+
search_messages = [SystemMessage(content=search_system_prompt)]
|
141 |
+
|
142 |
+
record_system_prompt = """
|
143 |
+
You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a JSON formatted list, where each element represents a piece of extracted information and follows the structure: `{"url": "source_url", "title": "source_title", "summary_content": "concise_summary", "thinking": "reasoning"}`.
|
144 |
+
|
145 |
+
**Important Considerations:**
|
146 |
+
|
147 |
+
1. **Minimize Information Loss:** While concise, prioritize retaining important details and nuances from the sources. Aim for a summary that captures the essence of the information without over-simplification. **Crucially, ensure to preserve key data and figures within the `summary_content`. This is essential for later stages, such as generating tables and reports.**
|
148 |
+
|
149 |
+
2. **Avoid Redundancy:** Do not record information that is already present in the Previous Recorded Information. Check for semantic similarity, not just exact matches. However, if the same information is expressed differently in a new source and this variation adds valuable context or clarity, it should be included.
|
150 |
+
|
151 |
+
3. **Source Information:** Extract and include the source title and URL for each piece of information summarized. This is crucial for verification and context. **The Current Search Results are provided in a specific format, where each item starts with "Title:", followed by the title, then "URL Source:", followed by the URL, and finally "Markdown Content:", followed by the content. Please extract the title and URL from this structure.** If a piece of information cannot be attributed to a specific source from the provided search results, use `"url": "unknown"` and `"title": "unknown"`.
|
152 |
+
|
153 |
+
4. **Thinking and Report Structure:** For each extracted piece of information, add a `"thinking"` key. This field should contain your assessment of how this information could be used in a report, which section it might belong to (e.g., introduction, background, analysis, conclusion, specific subtopics), and any other relevant thoughts about its significance or connection to other information.
|
154 |
+
|
155 |
+
**Output Format:**
|
156 |
+
|
157 |
+
Provide your output as a JSON formatted list. Each item in the list must adhere to the following format:
|
158 |
+
|
159 |
+
```json
|
160 |
+
[
|
161 |
+
{
|
162 |
+
"url": "source_url_1",
|
163 |
+
"title": "source_title_1",
|
164 |
+
"summary_content": "Concise summary of content. Remember to include key data and figures here.",
|
165 |
+
"thinking": "This could be used in the introduction to set the context. It also relates to the section on the history of the topic."
|
166 |
+
},
|
167 |
+
// ... more entries
|
168 |
+
{
|
169 |
+
"url": "unknown",
|
170 |
+
"title": "unknown",
|
171 |
+
"summary_content": "concise_summary_of_content_without_clear_source",
|
172 |
+
"thinking": "This might be useful background information, but I need to verify its accuracy. Could be used in the methodology section to explain how data was collected."
|
173 |
+
}
|
174 |
+
]
|
175 |
+
```
|
176 |
+
|
177 |
+
**Inputs:**
|
178 |
+
|
179 |
+
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
180 |
+
2. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string.
|
181 |
+
3. **Current Search Plan:** Research plan for current search.
|
182 |
+
4. **Current Search Query:** The current search query.
|
183 |
+
5. **Current Search Results:** Textual data gathered from the most recent search query.
|
184 |
+
"""
|
185 |
+
record_messages = [SystemMessage(content=record_system_prompt)]
|
186 |
+
|
187 |
+
search_iteration = 0
|
188 |
+
max_search_iterations = kwargs.get("max_search_iterations", 10) # Limit search iterations to prevent infinite loop
|
189 |
+
use_vision = kwargs.get("use_vision", False)
|
190 |
+
|
191 |
+
history_query = []
|
192 |
+
history_infos = []
|
193 |
+
try:
|
194 |
+
while search_iteration < max_search_iterations:
|
195 |
+
search_iteration += 1
|
196 |
+
logger.info(f"Start {search_iteration}th Search...")
|
197 |
+
history_query_ = json.dumps(history_query, indent=4)
|
198 |
+
history_infos_ = json.dumps(history_infos, indent=4)
|
199 |
+
query_prompt = f"This is search {search_iteration} of {max_search_iterations} maximum searches allowed.\n User Instruction:{task} \n Previous Queries:\n {history_query_} \n Previous Search Results:\n {history_infos_}\n"
|
200 |
+
search_messages.append(HumanMessage(content=query_prompt))
|
201 |
+
ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-1:])
|
202 |
+
search_messages.append(ai_query_msg)
|
203 |
+
if hasattr(ai_query_msg, "reasoning_content"):
|
204 |
+
logger.info("🤯 Start Search Deep Thinking: ")
|
205 |
+
logger.info(ai_query_msg.reasoning_content)
|
206 |
+
logger.info("🤯 End Search Deep Thinking")
|
207 |
+
ai_query_content = ai_query_msg.content.replace("```json", "").replace("```", "")
|
208 |
+
ai_query_content = repair_json(ai_query_content)
|
209 |
+
ai_query_content = json.loads(ai_query_content)
|
210 |
+
query_plan = ai_query_content["plan"]
|
211 |
+
logger.info(f"Current Iteration {search_iteration} Planing:")
|
212 |
+
logger.info(query_plan)
|
213 |
+
query_tasks = ai_query_content["queries"]
|
214 |
+
if not query_tasks:
|
215 |
+
break
|
216 |
+
else:
|
217 |
+
query_tasks = query_tasks[:max_query_num]
|
218 |
+
history_query.extend(query_tasks)
|
219 |
+
logger.info("Query tasks:")
|
220 |
+
logger.info(query_tasks)
|
221 |
+
|
222 |
+
# 2. Perform Web Search and Auto exec
|
223 |
+
# Parallel BU agents
|
224 |
+
add_infos = "1. Please click on the most relevant link to get information and go deeper, instead of just staying on the search page. \n" \
|
225 |
+
"2. When opening a PDF file, please remember to extract the content using extract_content instead of simply opening it for the user to view.\n"
|
226 |
+
if use_own_browser:
|
227 |
+
agent = CustomAgent(
|
228 |
+
task=query_tasks[0],
|
229 |
+
llm=llm,
|
230 |
+
add_infos=add_infos,
|
231 |
+
browser=browser,
|
232 |
+
browser_context=browser_context,
|
233 |
+
use_vision=use_vision,
|
234 |
+
system_prompt_class=CustomSystemPrompt,
|
235 |
+
agent_prompt_class=CustomAgentMessagePrompt,
|
236 |
+
max_actions_per_step=5,
|
237 |
+
controller=controller
|
238 |
+
)
|
239 |
+
agent_result = await agent.run(max_steps=kwargs.get("max_steps", 10))
|
240 |
+
query_results = [agent_result]
|
241 |
+
# Manually close all tab
|
242 |
+
session = await browser_context.get_session()
|
243 |
+
pages = session.context.pages
|
244 |
+
await browser_context.create_new_tab()
|
245 |
+
for page_id, page in enumerate(pages):
|
246 |
+
await page.close()
|
247 |
+
|
248 |
+
else:
|
249 |
+
agents = [CustomAgent(
|
250 |
+
task=task,
|
251 |
+
llm=llm,
|
252 |
+
add_infos=add_infos,
|
253 |
+
browser=browser,
|
254 |
+
browser_context=browser_context,
|
255 |
+
use_vision=use_vision,
|
256 |
+
system_prompt_class=CustomSystemPrompt,
|
257 |
+
agent_prompt_class=CustomAgentMessagePrompt,
|
258 |
+
max_actions_per_step=5,
|
259 |
+
controller=controller,
|
260 |
+
) for task in query_tasks]
|
261 |
+
query_results = await asyncio.gather(
|
262 |
+
*[agent.run(max_steps=kwargs.get("max_steps", 10)) for agent in agents])
|
263 |
+
|
264 |
+
if agent_state and agent_state.is_stop_requested():
|
265 |
+
# Stop
|
266 |
+
break
|
267 |
+
# 3. Summarize Search Result
|
268 |
+
query_result_dir = os.path.join(save_dir, "query_results")
|
269 |
+
os.makedirs(query_result_dir, exist_ok=True)
|
270 |
+
for i in range(len(query_tasks)):
|
271 |
+
query_result = query_results[i].final_result()
|
272 |
+
if not query_result:
|
273 |
+
continue
|
274 |
+
querr_save_path = os.path.join(query_result_dir, f"{search_iteration}-{i}.md")
|
275 |
+
logger.info(f"save query: {query_tasks[i]} at {querr_save_path}")
|
276 |
+
with open(querr_save_path, "w", encoding="utf-8") as fw:
|
277 |
+
fw.write(f"Query: {query_tasks[i]}\n")
|
278 |
+
fw.write(query_result)
|
279 |
+
# split query result in case the content is too long
|
280 |
+
query_results_split = query_result.split("Extracted page content:")
|
281 |
+
for qi, query_result_ in enumerate(query_results_split):
|
282 |
+
if not query_result_:
|
283 |
+
continue
|
284 |
+
else:
|
285 |
+
# TODO: limit content lenght: 128k tokens, ~3 chars per token
|
286 |
+
query_result_ = query_result_[:128000 * 3]
|
287 |
+
history_infos_ = json.dumps(history_infos, indent=4)
|
288 |
+
record_prompt = f"User Instruction:{task}. \nPrevious Recorded Information:\n {history_infos_}\n Current Search Iteration: {search_iteration}\n Current Search Plan:\n{query_plan}\n Current Search Query:\n {query_tasks[i]}\n Current Search Results: {query_result_}\n "
|
289 |
+
record_messages.append(HumanMessage(content=record_prompt))
|
290 |
+
ai_record_msg = llm.invoke(record_messages[:1] + record_messages[-1:])
|
291 |
+
record_messages.append(ai_record_msg)
|
292 |
+
if hasattr(ai_record_msg, "reasoning_content"):
|
293 |
+
logger.info("🤯 Start Record Deep Thinking: ")
|
294 |
+
logger.info(ai_record_msg.reasoning_content)
|
295 |
+
logger.info("🤯 End Record Deep Thinking")
|
296 |
+
record_content = ai_record_msg.content
|
297 |
+
record_content = repair_json(record_content)
|
298 |
+
new_record_infos = json.loads(record_content)
|
299 |
+
history_infos.extend(new_record_infos)
|
300 |
+
if agent_state and agent_state.is_stop_requested():
|
301 |
+
# Stop
|
302 |
+
break
|
303 |
+
|
304 |
+
logger.info("\nFinish Searching, Start Generating Report...")
|
305 |
+
|
306 |
+
# 5. Report Generation in Markdown (or JSON if you prefer)
|
307 |
+
return await generate_final_report(task, history_infos, save_dir, llm)
|
308 |
+
|
309 |
+
except Exception as e:
|
310 |
+
logger.error(f"Deep research Error: {e}")
|
311 |
+
return await generate_final_report(task, history_infos, save_dir, llm, str(e))
|
312 |
+
finally:
|
313 |
+
if browser:
|
314 |
+
await browser.close()
|
315 |
+
if browser_context:
|
316 |
+
await browser_context.close()
|
317 |
+
logger.info("Browser closed.")
|
318 |
+
|
319 |
+
|
320 |
+
async def generate_final_report(task, history_infos, save_dir, llm, error_msg=None):
|
321 |
+
"""Generate report from collected information with error handling"""
|
322 |
+
try:
|
323 |
+
logger.info("\nAttempting to generate final report from collected data...")
|
324 |
+
|
325 |
+
writer_system_prompt = """
|
326 |
+
You are a **Deep Researcher** and a professional report writer tasked with creating polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. You will write the report using Markdown format, ensuring it is both informative and visually appealing.
|
327 |
+
|
328 |
+
**Specific Instructions:**
|
329 |
+
|
330 |
+
* **Structure for Impact:** The report must have a clear, logical, and impactful structure. Begin with a compelling introduction that immediately grabs the reader's attention. Develop well-structured body paragraphs that flow smoothly and logically, and conclude with a concise and memorable conclusion that summarizes key takeaways and leaves a lasting impression.
|
331 |
+
* **Engaging and Vivid Language:** Employ precise, vivid, and descriptive language to make the report captivating and enjoyable to read. Use stylistic techniques to enhance engagement. Tailor your tone, vocabulary, and writing style to perfectly suit the subject matter and the intended audience to maximize impact and readability.
|
332 |
+
* **Accuracy, Credibility, and Citations:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. **Cite sources exclusively using bracketed sequential numbers within the text (e.g., [1], [2], etc.). If no references are used, omit citations entirely.** These numbers must correspond to a numbered list of references at the end of the report.
|
333 |
+
* **Publication-Ready Formatting:** Adhere strictly to Markdown formatting for excellent readability and a clean, highly professional visual appearance. Pay close attention to formatting details like headings, lists, emphasis, and spacing to optimize the visual presentation and reader experience. The report should be ready for immediate publication upon completion, requiring minimal to no further editing for style or format.
|
334 |
+
* **Conciseness and Clarity (Unless Specified Otherwise):** When the user does not provide a specific length, prioritize concise and to-the-point writing, maximizing information density while maintaining clarity.
|
335 |
+
* **Data-Driven Comparisons with Tables:** **When appropriate and beneficial for enhancing clarity and impact, present data comparisons in well-structured Markdown tables. This is especially encouraged when dealing with numerical data or when a visual comparison can significantly improve the reader's understanding.**
|
336 |
+
* **Length Adherence:** When the user specifies a length constraint, meticulously stay within reasonable bounds of that specification, ensuring the content is appropriately scaled without sacrificing quality or completeness.
|
337 |
+
* **Comprehensive Instruction Following:** Pay meticulous attention to all details and nuances provided in the user instructions. Strive to fulfill every aspect of the user's request with the highest degree of accuracy and attention to detail, creating a report that not only meets but exceeds expectations for quality and professionalism.
|
338 |
+
* **Reference List Formatting:** The reference list at the end must be formatted as follows:
|
339 |
+
`[1] Title (URL, if available)`
|
340 |
+
**Each reference must be separated by a blank line to ensure proper spacing.** For example:
|
341 |
+
|
342 |
+
```
|
343 |
+
[1] Title 1 (URL1, if available)
|
344 |
+
|
345 |
+
[2] Title 2 (URL2, if available)
|
346 |
+
```
|
347 |
+
**Furthermore, ensure that the reference list is free of duplicates. Each unique source should be listed only once, regardless of how many times it is cited in the text.**
|
348 |
+
* **ABSOLUTE FINAL OUTPUT RESTRICTION:** **Your output must contain ONLY the finished, publication-ready Markdown report. Do not include ANY extraneous text, phrases, preambles, meta-commentary, or markdown code indicators (e.g., "```markdown```"). The report should begin directly with the title and introductory paragraph, and end directly after the conclusion and the reference list (if applicable).** **Your response will be deemed a failure if this instruction is not followed precisely.**
|
349 |
+
|
350 |
+
**Inputs:**
|
351 |
+
|
352 |
+
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
|
353 |
+
2. **Search Information:** Information gathered from the search queries.
|
354 |
+
"""
|
355 |
+
|
356 |
+
history_infos_ = json.dumps(history_infos, indent=4)
|
357 |
+
record_json_path = os.path.join(save_dir, "record_infos.json")
|
358 |
+
logger.info(f"save All recorded information at {record_json_path}")
|
359 |
+
with open(record_json_path, "w") as fw:
|
360 |
+
json.dump(history_infos, fw, indent=4)
|
361 |
+
report_prompt = f"User Instruction:{task} \n Search Information:\n {history_infos_}"
|
362 |
+
report_messages = [SystemMessage(content=writer_system_prompt),
|
363 |
+
HumanMessage(content=report_prompt)] # New context for report generation
|
364 |
+
ai_report_msg = llm.invoke(report_messages)
|
365 |
+
if hasattr(ai_report_msg, "reasoning_content"):
|
366 |
+
logger.info("🤯 Start Report Deep Thinking: ")
|
367 |
+
logger.info(ai_report_msg.reasoning_content)
|
368 |
+
logger.info("🤯 End Report Deep Thinking")
|
369 |
+
report_content = ai_report_msg.content
|
370 |
+
report_content = re.sub(r"^```\s*markdown\s*|^\s*```|```\s*$", "", report_content, flags=re.MULTILINE)
|
371 |
+
report_content = report_content.strip()
|
372 |
+
|
373 |
+
# Add error notification to the report
|
374 |
+
if error_msg:
|
375 |
+
report_content = f"## ⚠️ Research Incomplete - Partial Results\n" \
|
376 |
+
f"**The research process was interrupted by an error:** {error_msg}\n\n" \
|
377 |
+
f"{report_content}"
|
378 |
+
|
379 |
+
report_file_path = os.path.join(save_dir, "final_report.md")
|
380 |
+
with open(report_file_path, "w", encoding="utf-8") as f:
|
381 |
+
f.write(report_content)
|
382 |
+
logger.info(f"Save Report at: {report_file_path}")
|
383 |
+
return report_content, report_file_path
|
384 |
+
|
385 |
+
except Exception as report_error:
|
386 |
+
logger.error(f"Failed to generate partial report: {report_error}")
|
387 |
+
return f"Error generating report: {str(report_error)}", None
|
src/utils/llm.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import pdb
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain_core.globals import get_llm_cache
|
5 |
+
from langchain_core.language_models.base import (
|
6 |
+
BaseLanguageModel,
|
7 |
+
LangSmithParams,
|
8 |
+
LanguageModelInput,
|
9 |
+
)
|
10 |
+
from langchain_core.load import dumpd, dumps
|
11 |
+
from langchain_core.messages import (
|
12 |
+
AIMessage,
|
13 |
+
SystemMessage,
|
14 |
+
AnyMessage,
|
15 |
+
BaseMessage,
|
16 |
+
BaseMessageChunk,
|
17 |
+
HumanMessage,
|
18 |
+
convert_to_messages,
|
19 |
+
message_chunk_to_message,
|
20 |
+
)
|
21 |
+
from langchain_core.outputs import (
|
22 |
+
ChatGeneration,
|
23 |
+
ChatGenerationChunk,
|
24 |
+
ChatResult,
|
25 |
+
LLMResult,
|
26 |
+
RunInfo,
|
27 |
+
)
|
28 |
+
from langchain_ollama import ChatOllama
|
29 |
+
from langchain_core.output_parsers.base import OutputParserLike
|
30 |
+
from langchain_core.runnables import Runnable, RunnableConfig
|
31 |
+
from langchain_core.tools import BaseTool
|
32 |
+
|
33 |
+
from typing import (
|
34 |
+
TYPE_CHECKING,
|
35 |
+
Any,
|
36 |
+
Callable,
|
37 |
+
Literal,
|
38 |
+
Optional,
|
39 |
+
Union,
|
40 |
+
cast, List,
|
41 |
+
)
|
42 |
+
|
43 |
+
|
44 |
+
class DeepSeekR1ChatOpenAI(ChatOpenAI):
|
45 |
+
|
46 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
47 |
+
super().__init__(*args, **kwargs)
|
48 |
+
self.client = OpenAI(
|
49 |
+
base_url=kwargs.get("base_url"),
|
50 |
+
api_key=kwargs.get("api_key")
|
51 |
+
)
|
52 |
+
|
53 |
+
async def ainvoke(
|
54 |
+
self,
|
55 |
+
input: LanguageModelInput,
|
56 |
+
config: Optional[RunnableConfig] = None,
|
57 |
+
*,
|
58 |
+
stop: Optional[list[str]] = None,
|
59 |
+
**kwargs: Any,
|
60 |
+
) -> AIMessage:
|
61 |
+
message_history = []
|
62 |
+
for input_ in input:
|
63 |
+
if isinstance(input_, SystemMessage):
|
64 |
+
message_history.append({"role": "system", "content": input_.content})
|
65 |
+
elif isinstance(input_, AIMessage):
|
66 |
+
message_history.append({"role": "assistant", "content": input_.content})
|
67 |
+
else:
|
68 |
+
message_history.append({"role": "user", "content": input_.content})
|
69 |
+
|
70 |
+
response = self.client.chat.completions.create(
|
71 |
+
model=self.model_name,
|
72 |
+
messages=message_history
|
73 |
+
)
|
74 |
+
|
75 |
+
reasoning_content = response.choices[0].message.reasoning_content
|
76 |
+
content = response.choices[0].message.content
|
77 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
78 |
+
|
79 |
+
def invoke(
|
80 |
+
self,
|
81 |
+
input: LanguageModelInput,
|
82 |
+
config: Optional[RunnableConfig] = None,
|
83 |
+
*,
|
84 |
+
stop: Optional[list[str]] = None,
|
85 |
+
**kwargs: Any,
|
86 |
+
) -> AIMessage:
|
87 |
+
message_history = []
|
88 |
+
for input_ in input:
|
89 |
+
if isinstance(input_, SystemMessage):
|
90 |
+
message_history.append({"role": "system", "content": input_.content})
|
91 |
+
elif isinstance(input_, AIMessage):
|
92 |
+
message_history.append({"role": "assistant", "content": input_.content})
|
93 |
+
else:
|
94 |
+
message_history.append({"role": "user", "content": input_.content})
|
95 |
+
|
96 |
+
response = self.client.chat.completions.create(
|
97 |
+
model=self.model_name,
|
98 |
+
messages=message_history
|
99 |
+
)
|
100 |
+
|
101 |
+
reasoning_content = response.choices[0].message.reasoning_content
|
102 |
+
content = response.choices[0].message.content
|
103 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
104 |
+
|
105 |
+
|
106 |
+
class DeepSeekR1ChatOllama(ChatOllama):
|
107 |
+
|
108 |
+
async def ainvoke(
|
109 |
+
self,
|
110 |
+
input: LanguageModelInput,
|
111 |
+
config: Optional[RunnableConfig] = None,
|
112 |
+
*,
|
113 |
+
stop: Optional[list[str]] = None,
|
114 |
+
**kwargs: Any,
|
115 |
+
) -> AIMessage:
|
116 |
+
org_ai_message = await super().ainvoke(input=input)
|
117 |
+
org_content = org_ai_message.content
|
118 |
+
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
|
119 |
+
content = org_content.split("</think>")[1]
|
120 |
+
if "**JSON Response:**" in content:
|
121 |
+
content = content.split("**JSON Response:**")[-1]
|
122 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
123 |
+
|
124 |
+
def invoke(
|
125 |
+
self,
|
126 |
+
input: LanguageModelInput,
|
127 |
+
config: Optional[RunnableConfig] = None,
|
128 |
+
*,
|
129 |
+
stop: Optional[list[str]] = None,
|
130 |
+
**kwargs: Any,
|
131 |
+
) -> AIMessage:
|
132 |
+
org_ai_message = super().invoke(input=input)
|
133 |
+
org_content = org_ai_message.content
|
134 |
+
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
|
135 |
+
content = org_content.split("</think>")[1]
|
136 |
+
if "**JSON Response:**" in content:
|
137 |
+
content = content.split("**JSON Response:**")[-1]
|
138 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
src/utils/utils.py
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Dict, Optional
|
6 |
+
import requests
|
7 |
+
import json
|
8 |
+
import gradio as gr
|
9 |
+
import uuid
|
10 |
+
|
11 |
+
from langchain_anthropic import ChatAnthropic
|
12 |
+
from langchain_mistralai import ChatMistralAI
|
13 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
14 |
+
from langchain_ollama import ChatOllama
|
15 |
+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
16 |
+
|
17 |
+
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
|
18 |
+
|
19 |
+
PROVIDER_DISPLAY_NAMES = {
|
20 |
+
"openai": "OpenAI",
|
21 |
+
"azure_openai": "Azure OpenAI",
|
22 |
+
"anthropic": "Anthropic",
|
23 |
+
"deepseek": "DeepSeek",
|
24 |
+
"google": "Google",
|
25 |
+
"alibaba": "Alibaba",
|
26 |
+
"moonshot": "MoonShot",
|
27 |
+
"unbound": "Unbound AI"
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
def get_llm_model(provider: str, **kwargs):
|
32 |
+
"""
|
33 |
+
获取LLM 模型
|
34 |
+
:param provider: 模型类型
|
35 |
+
:param kwargs:
|
36 |
+
:return:
|
37 |
+
"""
|
38 |
+
if provider not in ["ollama"]:
|
39 |
+
env_var = f"{provider.upper()}_API_KEY"
|
40 |
+
api_key = kwargs.get("api_key", "") or os.getenv(env_var, "")
|
41 |
+
if not api_key:
|
42 |
+
raise MissingAPIKeyError(provider, env_var)
|
43 |
+
kwargs["api_key"] = api_key
|
44 |
+
|
45 |
+
if provider == "anthropic":
|
46 |
+
if not kwargs.get("base_url", ""):
|
47 |
+
base_url = "https://api.anthropic.com"
|
48 |
+
else:
|
49 |
+
base_url = kwargs.get("base_url")
|
50 |
+
|
51 |
+
return ChatAnthropic(
|
52 |
+
model=kwargs.get("model_name", "claude-3-5-sonnet-20241022"),
|
53 |
+
temperature=kwargs.get("temperature", 0.0),
|
54 |
+
base_url=base_url,
|
55 |
+
api_key=api_key,
|
56 |
+
)
|
57 |
+
elif provider == 'mistral':
|
58 |
+
if not kwargs.get("base_url", ""):
|
59 |
+
base_url = os.getenv("MISTRAL_ENDPOINT", "https://api.mistral.ai/v1")
|
60 |
+
else:
|
61 |
+
base_url = kwargs.get("base_url")
|
62 |
+
if not kwargs.get("api_key", ""):
|
63 |
+
api_key = os.getenv("MISTRAL_API_KEY", "")
|
64 |
+
else:
|
65 |
+
api_key = kwargs.get("api_key")
|
66 |
+
|
67 |
+
return ChatMistralAI(
|
68 |
+
model=kwargs.get("model_name", "mistral-large-latest"),
|
69 |
+
temperature=kwargs.get("temperature", 0.0),
|
70 |
+
base_url=base_url,
|
71 |
+
api_key=api_key,
|
72 |
+
)
|
73 |
+
elif provider == "openai":
|
74 |
+
if not kwargs.get("base_url", ""):
|
75 |
+
base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
|
76 |
+
else:
|
77 |
+
base_url = kwargs.get("base_url")
|
78 |
+
|
79 |
+
return ChatOpenAI(
|
80 |
+
model=kwargs.get("model_name", "gpt-4o"),
|
81 |
+
temperature=kwargs.get("temperature", 0.0),
|
82 |
+
base_url=base_url,
|
83 |
+
api_key=api_key,
|
84 |
+
)
|
85 |
+
elif provider == "deepseek":
|
86 |
+
if not kwargs.get("base_url", ""):
|
87 |
+
base_url = os.getenv("DEEPSEEK_ENDPOINT", "")
|
88 |
+
else:
|
89 |
+
base_url = kwargs.get("base_url")
|
90 |
+
|
91 |
+
if kwargs.get("model_name", "deepseek-chat") == "deepseek-reasoner":
|
92 |
+
return DeepSeekR1ChatOpenAI(
|
93 |
+
model=kwargs.get("model_name", "deepseek-reasoner"),
|
94 |
+
temperature=kwargs.get("temperature", 0.0),
|
95 |
+
base_url=base_url,
|
96 |
+
api_key=api_key,
|
97 |
+
)
|
98 |
+
else:
|
99 |
+
return ChatOpenAI(
|
100 |
+
model=kwargs.get("model_name", "deepseek-chat"),
|
101 |
+
temperature=kwargs.get("temperature", 0.0),
|
102 |
+
base_url=base_url,
|
103 |
+
api_key=api_key,
|
104 |
+
)
|
105 |
+
elif provider == "google":
|
106 |
+
return ChatGoogleGenerativeAI(
|
107 |
+
model=kwargs.get("model_name", "gemini-2.0-flash-exp"),
|
108 |
+
temperature=kwargs.get("temperature", 0.0),
|
109 |
+
api_key=api_key,
|
110 |
+
)
|
111 |
+
elif provider == "ollama":
|
112 |
+
if not kwargs.get("base_url", ""):
|
113 |
+
base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
|
114 |
+
else:
|
115 |
+
base_url = kwargs.get("base_url")
|
116 |
+
|
117 |
+
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
|
118 |
+
return DeepSeekR1ChatOllama(
|
119 |
+
model=kwargs.get("model_name", "deepseek-r1:14b"),
|
120 |
+
temperature=kwargs.get("temperature", 0.0),
|
121 |
+
num_ctx=kwargs.get("num_ctx", 32000),
|
122 |
+
base_url=base_url,
|
123 |
+
)
|
124 |
+
else:
|
125 |
+
return ChatOllama(
|
126 |
+
model=kwargs.get("model_name", "qwen2.5:7b"),
|
127 |
+
temperature=kwargs.get("temperature", 0.0),
|
128 |
+
num_ctx=kwargs.get("num_ctx", 32000),
|
129 |
+
num_predict=kwargs.get("num_predict", 1024),
|
130 |
+
base_url=base_url,
|
131 |
+
)
|
132 |
+
elif provider == "azure_openai":
|
133 |
+
if not kwargs.get("base_url", ""):
|
134 |
+
base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")
|
135 |
+
else:
|
136 |
+
base_url = kwargs.get("base_url")
|
137 |
+
api_version = kwargs.get("api_version", "") or os.getenv("AZURE_OPENAI_API_VERSION", "2025-01-01-preview")
|
138 |
+
return AzureChatOpenAI(
|
139 |
+
model=kwargs.get("model_name", "gpt-4o"),
|
140 |
+
temperature=kwargs.get("temperature", 0.0),
|
141 |
+
api_version=api_version,
|
142 |
+
azure_endpoint=base_url,
|
143 |
+
api_key=api_key,
|
144 |
+
)
|
145 |
+
elif provider == "alibaba":
|
146 |
+
if not kwargs.get("base_url", ""):
|
147 |
+
base_url = os.getenv("ALIBABA_ENDPOINT", "https://dashscope.aliyuncs.com/compatible-mode/v1")
|
148 |
+
else:
|
149 |
+
base_url = kwargs.get("base_url")
|
150 |
+
|
151 |
+
return ChatOpenAI(
|
152 |
+
model=kwargs.get("model_name", "qwen-plus"),
|
153 |
+
temperature=kwargs.get("temperature", 0.0),
|
154 |
+
base_url=base_url,
|
155 |
+
api_key=api_key,
|
156 |
+
)
|
157 |
+
elif provider == "moonshot":
|
158 |
+
return ChatOpenAI(
|
159 |
+
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
|
160 |
+
temperature=kwargs.get("temperature", 0.0),
|
161 |
+
base_url=os.getenv("MOONSHOT_ENDPOINT"),
|
162 |
+
api_key=os.getenv("MOONSHOT_API_KEY"),
|
163 |
+
)
|
164 |
+
elif provider == "unbound":
|
165 |
+
return ChatOpenAI(
|
166 |
+
model=kwargs.get("model_name", "gpt-4o-mini"),
|
167 |
+
temperature=kwargs.get("temperature", 0.0),
|
168 |
+
base_url=os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"),
|
169 |
+
api_key=api_key,
|
170 |
+
)
|
171 |
+
elif provider == "siliconflow":
|
172 |
+
if not kwargs.get("api_key", ""):
|
173 |
+
api_key = os.getenv("SiliconFLOW_API_KEY", "")
|
174 |
+
else:
|
175 |
+
api_key = kwargs.get("api_key")
|
176 |
+
if not kwargs.get("base_url", ""):
|
177 |
+
base_url = os.getenv("SiliconFLOW_ENDPOINT", "")
|
178 |
+
else:
|
179 |
+
base_url = kwargs.get("base_url")
|
180 |
+
return ChatOpenAI(
|
181 |
+
api_key=api_key,
|
182 |
+
base_url=base_url,
|
183 |
+
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
|
184 |
+
temperature=kwargs.get("temperature", 0.0),
|
185 |
+
)
|
186 |
+
else:
|
187 |
+
raise ValueError(f"Unsupported provider: {provider}")
|
188 |
+
|
189 |
+
|
190 |
+
# Predefined model names for common providers
|
191 |
+
model_names = {
|
192 |
+
"anthropic": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
|
193 |
+
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
|
194 |
+
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
|
195 |
+
"google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest",
|
196 |
+
"gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05"],
|
197 |
+
"ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b",
|
198 |
+
"deepseek-r1:14b", "deepseek-r1:32b"],
|
199 |
+
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
|
200 |
+
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"],
|
201 |
+
"alibaba": ["qwen-plus", "qwen-max", "qwen-turbo", "qwen-long"],
|
202 |
+
"moonshot": ["moonshot-v1-32k-vision-preview", "moonshot-v1-8k-vision-preview"],
|
203 |
+
"unbound": ["gemini-2.0-flash", "gpt-4o-mini", "gpt-4o", "gpt-4.5-preview"],
|
204 |
+
"siliconflow": [
|
205 |
+
"deepseek-ai/DeepSeek-R1",
|
206 |
+
"deepseek-ai/DeepSeek-V3",
|
207 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
208 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
209 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
210 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
211 |
+
"deepseek-ai/DeepSeek-V2.5",
|
212 |
+
"deepseek-ai/deepseek-vl2",
|
213 |
+
"Qwen/Qwen2.5-72B-Instruct-128K",
|
214 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
215 |
+
"Qwen/Qwen2.5-32B-Instruct",
|
216 |
+
"Qwen/Qwen2.5-14B-Instruct",
|
217 |
+
"Qwen/Qwen2.5-7B-Instruct",
|
218 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
219 |
+
"Qwen/Qwen2.5-Coder-7B-Instruct",
|
220 |
+
"Qwen/Qwen2-7B-Instruct",
|
221 |
+
"Qwen/Qwen2-1.5B-Instruct",
|
222 |
+
"Qwen/QwQ-32B-Preview",
|
223 |
+
"Qwen/Qwen2-VL-72B-Instruct",
|
224 |
+
"Qwen/Qwen2.5-VL-32B-Instruct",
|
225 |
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
226 |
+
"TeleAI/TeleChat2",
|
227 |
+
"THUDM/glm-4-9b-chat",
|
228 |
+
"Vendor-A/Qwen/Qwen2.5-72B-Instruct",
|
229 |
+
"internlm/internlm2_5-7b-chat",
|
230 |
+
"internlm/internlm2_5-20b-chat",
|
231 |
+
"Pro/Qwen/Qwen2.5-7B-Instruct",
|
232 |
+
"Pro/Qwen/Qwen2-7B-Instruct",
|
233 |
+
"Pro/Qwen/Qwen2-1.5B-Instruct",
|
234 |
+
"Pro/THUDM/chatglm3-6b",
|
235 |
+
"Pro/THUDM/glm-4-9b-chat",
|
236 |
+
],
|
237 |
+
}
|
238 |
+
|
239 |
+
|
240 |
+
# Callback to update the model name dropdown based on the selected provider
|
241 |
+
def update_model_dropdown(llm_provider, api_key=None, base_url=None):
|
242 |
+
"""
|
243 |
+
Update the model name dropdown with predefined models for the selected provider.
|
244 |
+
"""
|
245 |
+
import gradio as gr
|
246 |
+
# Use API keys from .env if not provided
|
247 |
+
if not api_key:
|
248 |
+
api_key = os.getenv(f"{llm_provider.upper()}_API_KEY", "")
|
249 |
+
if not base_url:
|
250 |
+
base_url = os.getenv(f"{llm_provider.upper()}_BASE_URL", "")
|
251 |
+
|
252 |
+
# Use predefined models for the selected provider
|
253 |
+
if llm_provider in model_names:
|
254 |
+
return gr.Dropdown(choices=model_names[llm_provider], value=model_names[llm_provider][0], interactive=True)
|
255 |
+
else:
|
256 |
+
return gr.Dropdown(choices=[], value="", interactive=True, allow_custom_value=True)
|
257 |
+
|
258 |
+
|
259 |
+
class MissingAPIKeyError(Exception):
|
260 |
+
"""Custom exception for missing API key."""
|
261 |
+
|
262 |
+
def __init__(self, provider: str, env_var: str):
|
263 |
+
provider_display = PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
|
264 |
+
super().__init__(f"💥 {provider_display} API key not found! 🔑 Please set the "
|
265 |
+
f"`{env_var}` environment variable or provide it in the UI.")
|
266 |
+
|
267 |
+
|
268 |
+
def encode_image(img_path):
|
269 |
+
if not img_path:
|
270 |
+
return None
|
271 |
+
with open(img_path, "rb") as fin:
|
272 |
+
image_data = base64.b64encode(fin.read()).decode("utf-8")
|
273 |
+
return image_data
|
274 |
+
|
275 |
+
|
276 |
+
def get_latest_files(directory: str, file_types: list = ['.webm', '.zip']) -> Dict[str, Optional[str]]:
|
277 |
+
"""Get the latest recording and trace files"""
|
278 |
+
latest_files: Dict[str, Optional[str]] = {ext: None for ext in file_types}
|
279 |
+
|
280 |
+
if not os.path.exists(directory):
|
281 |
+
os.makedirs(directory, exist_ok=True)
|
282 |
+
return latest_files
|
283 |
+
|
284 |
+
for file_type in file_types:
|
285 |
+
try:
|
286 |
+
matches = list(Path(directory).rglob(f"*{file_type}"))
|
287 |
+
if matches:
|
288 |
+
latest = max(matches, key=lambda p: p.stat().st_mtime)
|
289 |
+
# Only return files that are complete (not being written)
|
290 |
+
if time.time() - latest.stat().st_mtime > 1.0:
|
291 |
+
latest_files[file_type] = str(latest)
|
292 |
+
except Exception as e:
|
293 |
+
print(f"Error getting latest {file_type} file: {e}")
|
294 |
+
|
295 |
+
return latest_files
|
296 |
+
|
297 |
+
|
298 |
+
async def capture_screenshot(browser_context):
|
299 |
+
"""Capture and encode a screenshot"""
|
300 |
+
# Extract the Playwright browser instance
|
301 |
+
playwright_browser = browser_context.browser.playwright_browser # Ensure this is correct.
|
302 |
+
|
303 |
+
# Check if the browser instance is valid and if an existing context can be reused
|
304 |
+
if playwright_browser and playwright_browser.contexts:
|
305 |
+
playwright_context = playwright_browser.contexts[0]
|
306 |
+
else:
|
307 |
+
return None
|
308 |
+
|
309 |
+
# Access pages in the context
|
310 |
+
pages = None
|
311 |
+
if playwright_context:
|
312 |
+
pages = playwright_context.pages
|
313 |
+
|
314 |
+
# Use an existing page or create a new one if none exist
|
315 |
+
if pages:
|
316 |
+
active_page = pages[0]
|
317 |
+
for page in pages:
|
318 |
+
if page.url != "about:blank":
|
319 |
+
active_page = page
|
320 |
+
else:
|
321 |
+
return None
|
322 |
+
|
323 |
+
# Take screenshot
|
324 |
+
try:
|
325 |
+
screenshot = await active_page.screenshot(
|
326 |
+
type='jpeg',
|
327 |
+
quality=75,
|
328 |
+
scale="css"
|
329 |
+
)
|
330 |
+
encoded = base64.b64encode(screenshot).decode('utf-8')
|
331 |
+
return encoded
|
332 |
+
except Exception as e:
|
333 |
+
return None
|
334 |
+
|
335 |
+
|
336 |
+
class ConfigManager:
|
337 |
+
def __init__(self):
|
338 |
+
self.components = {}
|
339 |
+
self.component_order = []
|
340 |
+
|
341 |
+
def register_component(self, name: str, component):
|
342 |
+
"""Register a gradio component for config management."""
|
343 |
+
self.components[name] = component
|
344 |
+
if name not in self.component_order:
|
345 |
+
self.component_order.append(name)
|
346 |
+
return component
|
347 |
+
|
348 |
+
def save_current_config(self):
|
349 |
+
"""Save the current configuration of all registered components."""
|
350 |
+
current_config = {}
|
351 |
+
for name in self.component_order:
|
352 |
+
component = self.components[name]
|
353 |
+
# Get the current value from the component
|
354 |
+
current_config[name] = getattr(component, "value", None)
|
355 |
+
|
356 |
+
return save_config_to_file(current_config)
|
357 |
+
|
358 |
+
def update_ui_from_config(self, config_file):
|
359 |
+
"""Update UI components from a loaded configuration file."""
|
360 |
+
if config_file is None:
|
361 |
+
return [gr.update() for _ in self.component_order] + ["No file selected."]
|
362 |
+
|
363 |
+
loaded_config = load_config_from_file(config_file.name)
|
364 |
+
|
365 |
+
if not isinstance(loaded_config, dict):
|
366 |
+
return [gr.update() for _ in self.component_order] + ["Error: Invalid configuration file."]
|
367 |
+
|
368 |
+
# Prepare updates for all components
|
369 |
+
updates = []
|
370 |
+
for name in self.component_order:
|
371 |
+
if name in loaded_config:
|
372 |
+
updates.append(gr.update(value=loaded_config[name]))
|
373 |
+
else:
|
374 |
+
updates.append(gr.update())
|
375 |
+
|
376 |
+
updates.append("Configuration loaded successfully.")
|
377 |
+
return updates
|
378 |
+
|
379 |
+
def get_all_components(self):
|
380 |
+
"""Return all registered components in the order they were registered."""
|
381 |
+
return [self.components[name] for name in self.component_order]
|
382 |
+
|
383 |
+
|
384 |
+
def load_config_from_file(config_file):
|
385 |
+
"""Load settings from a config file (JSON format)."""
|
386 |
+
try:
|
387 |
+
with open(config_file, 'r') as f:
|
388 |
+
settings = json.load(f)
|
389 |
+
return settings
|
390 |
+
except Exception as e:
|
391 |
+
return f"Error loading configuration: {str(e)}"
|
392 |
+
|
393 |
+
|
394 |
+
def save_config_to_file(settings, save_dir="./tmp/webui_settings"):
|
395 |
+
"""Save the current settings to a UUID.json file with a UUID name."""
|
396 |
+
os.makedirs(save_dir, exist_ok=True)
|
397 |
+
config_file = os.path.join(save_dir, f"{uuid.uuid4()}.json")
|
398 |
+
with open(config_file, 'w') as f:
|
399 |
+
json.dump(settings, f, indent=2)
|
400 |
+
return f"Configuration saved to {config_file}"
|
supervisord.conf
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[supervisord]
|
2 |
+
user=root
|
3 |
+
nodaemon=true
|
4 |
+
logfile=/dev/stdout
|
5 |
+
logfile_maxbytes=0
|
6 |
+
loglevel=debug
|
7 |
+
|
8 |
+
[program:xvfb]
|
9 |
+
command=Xvfb :99 -screen 0 %(ENV_RESOLUTION)s -ac +extension GLX +render -noreset
|
10 |
+
autorestart=true
|
11 |
+
stdout_logfile=/dev/stdout
|
12 |
+
stdout_logfile_maxbytes=0
|
13 |
+
stderr_logfile=/dev/stderr
|
14 |
+
stderr_logfile_maxbytes=0
|
15 |
+
priority=100
|
16 |
+
startsecs=3
|
17 |
+
stopsignal=TERM
|
18 |
+
stopwaitsecs=10
|
19 |
+
|
20 |
+
[program:vnc_setup]
|
21 |
+
command=bash -c "mkdir -p ~/.vnc && echo '%(ENV_VNC_PASSWORD)s' | vncpasswd -f > ~/.vnc/passwd && chmod 600 ~/.vnc/passwd && ls -la ~/.vnc/passwd"
|
22 |
+
autorestart=false
|
23 |
+
startsecs=0
|
24 |
+
priority=150
|
25 |
+
stdout_logfile=/dev/stdout
|
26 |
+
stdout_logfile_maxbytes=0
|
27 |
+
stderr_logfile=/dev/stderr
|
28 |
+
stderr_logfile_maxbytes=0
|
29 |
+
|
30 |
+
[program:x11vnc]
|
31 |
+
command=bash -c "mkdir -p /var/log && touch /var/log/x11vnc.log && chmod 666 /var/log/x11vnc.log && sleep 5 && DISPLAY=:99 x11vnc -display :99 -forever -shared -rfbauth /root/.vnc/passwd -rfbport 5901 -o /var/log/x11vnc.log"
|
32 |
+
autorestart=true
|
33 |
+
stdout_logfile=/dev/stdout
|
34 |
+
stdout_logfile_maxbytes=0
|
35 |
+
stderr_logfile=/dev/stderr
|
36 |
+
stderr_logfile_maxbytes=0
|
37 |
+
priority=200
|
38 |
+
startretries=10
|
39 |
+
startsecs=10
|
40 |
+
stopsignal=TERM
|
41 |
+
stopwaitsecs=10
|
42 |
+
depends_on=vnc_setup,xvfb
|
43 |
+
|
44 |
+
[program:x11vnc_log]
|
45 |
+
command=bash -c "mkdir -p /var/log && touch /var/log/x11vnc.log && tail -f /var/log/x11vnc.log"
|
46 |
+
autorestart=true
|
47 |
+
stdout_logfile=/dev/stdout
|
48 |
+
stdout_logfile_maxbytes=0
|
49 |
+
stderr_logfile=/dev/stderr
|
50 |
+
stderr_logfile_maxbytes=0
|
51 |
+
priority=250
|
52 |
+
stopsignal=TERM
|
53 |
+
stopwaitsecs=5
|
54 |
+
depends_on=x11vnc
|
55 |
+
|
56 |
+
[program:novnc]
|
57 |
+
command=bash -c "sleep 5 && cd /opt/novnc && ./utils/novnc_proxy --vnc localhost:5901 --listen 0.0.0.0:6080 --web /opt/novnc"
|
58 |
+
autorestart=true
|
59 |
+
stdout_logfile=/dev/stdout
|
60 |
+
stdout_logfile_maxbytes=0
|
61 |
+
stderr_logfile=/dev/stderr
|
62 |
+
stderr_logfile_maxbytes=0
|
63 |
+
priority=300
|
64 |
+
startretries=5
|
65 |
+
startsecs=3
|
66 |
+
depends_on=x11vnc
|
67 |
+
|
68 |
+
[program:persistent_browser]
|
69 |
+
environment=START_URL="data:text/html,<html><body><h1>Browser Ready</h1></body></html>"
|
70 |
+
command=bash -c "mkdir -p /app/data/chrome_data && sleep 8 && $(find /ms-playwright/chromium-*/chrome-linux -name chrome) --user-data-dir=/app/data/chrome_data --window-position=0,0 --window-size=%(ENV_RESOLUTION_WIDTH)s,%(ENV_RESOLUTION_HEIGHT)s --start-maximized --no-sandbox --disable-dev-shm-usage --disable-gpu --disable-software-rasterizer --disable-setuid-sandbox --no-first-run --no-default-browser-check --no-experiments --ignore-certificate-errors --remote-debugging-port=9222 --remote-debugging-address=0.0.0.0 \"$START_URL\""
|
71 |
+
autorestart=true
|
72 |
+
stdout_logfile=/dev/stdout
|
73 |
+
stdout_logfile_maxbytes=0
|
74 |
+
stderr_logfile=/dev/stderr
|
75 |
+
stderr_logfile_maxbytes=0
|
76 |
+
priority=350
|
77 |
+
startretries=5
|
78 |
+
startsecs=10
|
79 |
+
stopsignal=TERM
|
80 |
+
stopwaitsecs=15
|
81 |
+
depends_on=novnc
|
82 |
+
|
83 |
+
[program:webui]
|
84 |
+
command=python webui.py --ip 0.0.0.0 --port 7788
|
85 |
+
directory=/app
|
86 |
+
autorestart=true
|
87 |
+
stdout_logfile=/dev/stdout
|
88 |
+
stdout_logfile_maxbytes=0
|
89 |
+
stderr_logfile=/dev/stderr
|
90 |
+
stderr_logfile_maxbytes=0
|
91 |
+
priority=400
|
92 |
+
startretries=3
|
93 |
+
startsecs=3
|
94 |
+
stopsignal=TERM
|
95 |
+
stopwaitsecs=10
|
96 |
+
depends_on=persistent_browser
|
tests/test_browser_use.py
ADDED
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
import sys
|
7 |
+
|
8 |
+
sys.path.append(".")
|
9 |
+
import asyncio
|
10 |
+
import os
|
11 |
+
import sys
|
12 |
+
from pprint import pprint
|
13 |
+
|
14 |
+
from browser_use import Agent
|
15 |
+
from browser_use.agent.views import AgentHistoryList
|
16 |
+
|
17 |
+
from src.utils import utils
|
18 |
+
|
19 |
+
|
20 |
+
async def test_browser_use_org():
|
21 |
+
from browser_use.browser.browser import Browser, BrowserConfig
|
22 |
+
from browser_use.browser.context import (
|
23 |
+
BrowserContextConfig,
|
24 |
+
BrowserContextWindowSize,
|
25 |
+
)
|
26 |
+
|
27 |
+
# llm = utils.get_llm_model(
|
28 |
+
# provider="azure_openai",
|
29 |
+
# model_name="gpt-4o",
|
30 |
+
# temperature=0.8,
|
31 |
+
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
|
32 |
+
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
|
33 |
+
# )
|
34 |
+
|
35 |
+
# llm = utils.get_llm_model(
|
36 |
+
# provider="deepseek",
|
37 |
+
# model_name="deepseek-chat",
|
38 |
+
# temperature=0.8
|
39 |
+
# )
|
40 |
+
|
41 |
+
llm = utils.get_llm_model(
|
42 |
+
provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
|
43 |
+
)
|
44 |
+
|
45 |
+
window_w, window_h = 1920, 1080
|
46 |
+
use_vision = False
|
47 |
+
use_own_browser = False
|
48 |
+
if use_own_browser:
|
49 |
+
chrome_path = os.getenv("CHROME_PATH", None)
|
50 |
+
if chrome_path == "":
|
51 |
+
chrome_path = None
|
52 |
+
else:
|
53 |
+
chrome_path = None
|
54 |
+
|
55 |
+
tool_calling_method = "json_schema" # setting to json_schema when using ollma
|
56 |
+
|
57 |
+
browser = Browser(
|
58 |
+
config=BrowserConfig(
|
59 |
+
headless=False,
|
60 |
+
disable_security=True,
|
61 |
+
chrome_instance_path=chrome_path,
|
62 |
+
extra_chromium_args=[f"--window-size={window_w},{window_h}"],
|
63 |
+
)
|
64 |
+
)
|
65 |
+
async with await browser.new_context(
|
66 |
+
config=BrowserContextConfig(
|
67 |
+
trace_path="./tmp/traces",
|
68 |
+
save_recording_path="./tmp/record_videos",
|
69 |
+
no_viewport=False,
|
70 |
+
browser_window_size=BrowserContextWindowSize(
|
71 |
+
width=window_w, height=window_h
|
72 |
+
),
|
73 |
+
)
|
74 |
+
) as browser_context:
|
75 |
+
agent = Agent(
|
76 |
+
task="go to google.com and type 'OpenAI' click search and give me the first url",
|
77 |
+
llm=llm,
|
78 |
+
browser_context=browser_context,
|
79 |
+
use_vision=use_vision,
|
80 |
+
tool_calling_method=tool_calling_method
|
81 |
+
)
|
82 |
+
history: AgentHistoryList = await agent.run(max_steps=10)
|
83 |
+
|
84 |
+
print("Final Result:")
|
85 |
+
pprint(history.final_result(), indent=4)
|
86 |
+
|
87 |
+
print("\nErrors:")
|
88 |
+
pprint(history.errors(), indent=4)
|
89 |
+
|
90 |
+
# e.g. xPaths the model clicked on
|
91 |
+
print("\nModel Outputs:")
|
92 |
+
pprint(history.model_actions(), indent=4)
|
93 |
+
|
94 |
+
print("\nThoughts:")
|
95 |
+
pprint(history.model_thoughts(), indent=4)
|
96 |
+
# close browser
|
97 |
+
await browser.close()
|
98 |
+
|
99 |
+
|
100 |
+
async def test_browser_use_custom():
|
101 |
+
from browser_use.browser.context import BrowserContextWindowSize
|
102 |
+
from browser_use.browser.browser import BrowserConfig
|
103 |
+
from playwright.async_api import async_playwright
|
104 |
+
|
105 |
+
from src.agent.custom_agent import CustomAgent
|
106 |
+
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
107 |
+
from src.browser.custom_browser import CustomBrowser
|
108 |
+
from src.browser.custom_context import BrowserContextConfig
|
109 |
+
from src.controller.custom_controller import CustomController
|
110 |
+
|
111 |
+
window_w, window_h = 1280, 1100
|
112 |
+
|
113 |
+
# llm = utils.get_llm_model(
|
114 |
+
# provider="openai",
|
115 |
+
# model_name="gpt-4o",
|
116 |
+
# temperature=0.8,
|
117 |
+
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
|
118 |
+
# api_key=os.getenv("OPENAI_API_KEY", ""),
|
119 |
+
# )
|
120 |
+
|
121 |
+
llm = utils.get_llm_model(
|
122 |
+
provider="azure_openai",
|
123 |
+
model_name="gpt-4o",
|
124 |
+
temperature=0.5,
|
125 |
+
base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
|
126 |
+
api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
|
127 |
+
)
|
128 |
+
|
129 |
+
# llm = utils.get_llm_model(
|
130 |
+
# provider="google",
|
131 |
+
# model_name="gemini-2.0-flash",
|
132 |
+
# temperature=0.6,
|
133 |
+
# api_key=os.getenv("GOOGLE_API_KEY", "")
|
134 |
+
# )
|
135 |
+
|
136 |
+
# llm = utils.get_llm_model(
|
137 |
+
# provider="deepseek",
|
138 |
+
# model_name="deepseek-reasoner",
|
139 |
+
# temperature=0.8
|
140 |
+
# )
|
141 |
+
|
142 |
+
# llm = utils.get_llm_model(
|
143 |
+
# provider="deepseek",
|
144 |
+
# model_name="deepseek-chat",
|
145 |
+
# temperature=0.8
|
146 |
+
# )
|
147 |
+
|
148 |
+
# llm = utils.get_llm_model(
|
149 |
+
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
|
150 |
+
# )
|
151 |
+
|
152 |
+
# llm = utils.get_llm_model(
|
153 |
+
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
|
154 |
+
# )
|
155 |
+
|
156 |
+
controller = CustomController()
|
157 |
+
use_own_browser = True
|
158 |
+
disable_security = True
|
159 |
+
use_vision = True # Set to False when using DeepSeek
|
160 |
+
|
161 |
+
max_actions_per_step = 10
|
162 |
+
playwright = None
|
163 |
+
browser = None
|
164 |
+
browser_context = None
|
165 |
+
|
166 |
+
try:
|
167 |
+
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
|
168 |
+
if use_own_browser:
|
169 |
+
chrome_path = os.getenv("CHROME_PATH", None)
|
170 |
+
if chrome_path == "":
|
171 |
+
chrome_path = None
|
172 |
+
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
|
173 |
+
if chrome_user_data:
|
174 |
+
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
|
175 |
+
else:
|
176 |
+
chrome_path = None
|
177 |
+
browser = CustomBrowser(
|
178 |
+
config=BrowserConfig(
|
179 |
+
headless=False,
|
180 |
+
disable_security=disable_security,
|
181 |
+
chrome_instance_path=chrome_path,
|
182 |
+
extra_chromium_args=extra_chromium_args,
|
183 |
+
)
|
184 |
+
)
|
185 |
+
browser_context = await browser.new_context(
|
186 |
+
config=BrowserContextConfig(
|
187 |
+
trace_path="./tmp/traces",
|
188 |
+
save_recording_path="./tmp/record_videos",
|
189 |
+
no_viewport=False,
|
190 |
+
browser_window_size=BrowserContextWindowSize(
|
191 |
+
width=window_w, height=window_h
|
192 |
+
),
|
193 |
+
)
|
194 |
+
)
|
195 |
+
agent = CustomAgent(
|
196 |
+
task="open youtube in tab 1 , open google email in tab 2, open facebook in tab 3",
|
197 |
+
add_infos="", # some hints for llm to complete the task
|
198 |
+
llm=llm,
|
199 |
+
browser=browser,
|
200 |
+
browser_context=browser_context,
|
201 |
+
controller=controller,
|
202 |
+
system_prompt_class=CustomSystemPrompt,
|
203 |
+
agent_prompt_class=CustomAgentMessagePrompt,
|
204 |
+
use_vision=use_vision,
|
205 |
+
max_actions_per_step=max_actions_per_step,
|
206 |
+
generate_gif=True
|
207 |
+
)
|
208 |
+
history: AgentHistoryList = await agent.run(max_steps=100)
|
209 |
+
|
210 |
+
print("Final Result:")
|
211 |
+
pprint(history.final_result(), indent=4)
|
212 |
+
|
213 |
+
print("\nErrors:")
|
214 |
+
pprint(history.errors(), indent=4)
|
215 |
+
|
216 |
+
# e.g. xPaths the model clicked on
|
217 |
+
print("\nModel Outputs:")
|
218 |
+
pprint(history.model_actions(), indent=4)
|
219 |
+
|
220 |
+
print("\nThoughts:")
|
221 |
+
pprint(history.model_thoughts(), indent=4)
|
222 |
+
|
223 |
+
|
224 |
+
except Exception:
|
225 |
+
import traceback
|
226 |
+
|
227 |
+
traceback.print_exc()
|
228 |
+
finally:
|
229 |
+
# 显式关闭持久化上下文
|
230 |
+
if browser_context:
|
231 |
+
await browser_context.close()
|
232 |
+
|
233 |
+
# 关闭 Playwright 对象
|
234 |
+
if playwright:
|
235 |
+
await playwright.stop()
|
236 |
+
if browser:
|
237 |
+
await browser.close()
|
238 |
+
|
239 |
+
|
240 |
+
async def test_browser_use_parallel():
|
241 |
+
from browser_use.browser.context import BrowserContextWindowSize
|
242 |
+
from browser_use.browser.browser import BrowserConfig
|
243 |
+
from playwright.async_api import async_playwright
|
244 |
+
from browser_use.browser.browser import Browser
|
245 |
+
from src.agent.custom_agent import CustomAgent
|
246 |
+
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
247 |
+
from src.browser.custom_browser import CustomBrowser
|
248 |
+
from src.browser.custom_context import BrowserContextConfig
|
249 |
+
from src.controller.custom_controller import CustomController
|
250 |
+
|
251 |
+
window_w, window_h = 1920, 1080
|
252 |
+
|
253 |
+
# llm = utils.get_llm_model(
|
254 |
+
# provider="openai",
|
255 |
+
# model_name="gpt-4o",
|
256 |
+
# temperature=0.8,
|
257 |
+
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
|
258 |
+
# api_key=os.getenv("OPENAI_API_KEY", ""),
|
259 |
+
# )
|
260 |
+
|
261 |
+
# llm = utils.get_llm_model(
|
262 |
+
# provider="azure_openai",
|
263 |
+
# model_name="gpt-4o",
|
264 |
+
# temperature=0.8,
|
265 |
+
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
|
266 |
+
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
|
267 |
+
# )
|
268 |
+
|
269 |
+
llm = utils.get_llm_model(
|
270 |
+
provider="gemini",
|
271 |
+
model_name="gemini-2.0-flash-exp",
|
272 |
+
temperature=1.0,
|
273 |
+
api_key=os.getenv("GOOGLE_API_KEY", "")
|
274 |
+
)
|
275 |
+
|
276 |
+
# llm = utils.get_llm_model(
|
277 |
+
# provider="deepseek",
|
278 |
+
# model_name="deepseek-reasoner",
|
279 |
+
# temperature=0.8
|
280 |
+
# )
|
281 |
+
|
282 |
+
# llm = utils.get_llm_model(
|
283 |
+
# provider="deepseek",
|
284 |
+
# model_name="deepseek-chat",
|
285 |
+
# temperature=0.8
|
286 |
+
# )
|
287 |
+
|
288 |
+
# llm = utils.get_llm_model(
|
289 |
+
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
|
290 |
+
# )
|
291 |
+
|
292 |
+
# llm = utils.get_llm_model(
|
293 |
+
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
|
294 |
+
# )
|
295 |
+
|
296 |
+
controller = CustomController()
|
297 |
+
use_own_browser = True
|
298 |
+
disable_security = True
|
299 |
+
use_vision = True # Set to False when using DeepSeek
|
300 |
+
|
301 |
+
max_actions_per_step = 1
|
302 |
+
playwright = None
|
303 |
+
browser = None
|
304 |
+
browser_context = None
|
305 |
+
|
306 |
+
browser = Browser(
|
307 |
+
config=BrowserConfig(
|
308 |
+
disable_security=True,
|
309 |
+
headless=False,
|
310 |
+
new_context_config=BrowserContextConfig(save_recording_path='./tmp/recordings'),
|
311 |
+
)
|
312 |
+
)
|
313 |
+
|
314 |
+
try:
|
315 |
+
agents = [
|
316 |
+
Agent(task=task, llm=llm, browser=browser)
|
317 |
+
for task in [
|
318 |
+
'Search Google for weather in Tokyo',
|
319 |
+
'Check Reddit front page title',
|
320 |
+
'Find NASA image of the day',
|
321 |
+
'Check top story on CNN',
|
322 |
+
# 'Search latest SpaceX launch date',
|
323 |
+
# 'Look up population of Paris',
|
324 |
+
# 'Find current time in Sydney',
|
325 |
+
# 'Check who won last Super Bowl',
|
326 |
+
# 'Search trending topics on Twitter',
|
327 |
+
]
|
328 |
+
]
|
329 |
+
|
330 |
+
history = await asyncio.gather(*[agent.run() for agent in agents])
|
331 |
+
pdb.set_trace()
|
332 |
+
print("Final Result:")
|
333 |
+
pprint(history.final_result(), indent=4)
|
334 |
+
|
335 |
+
print("\nErrors:")
|
336 |
+
pprint(history.errors(), indent=4)
|
337 |
+
|
338 |
+
# e.g. xPaths the model clicked on
|
339 |
+
print("\nModel Outputs:")
|
340 |
+
pprint(history.model_actions(), indent=4)
|
341 |
+
|
342 |
+
print("\nThoughts:")
|
343 |
+
pprint(history.model_thoughts(), indent=4)
|
344 |
+
# close browser
|
345 |
+
except Exception:
|
346 |
+
import traceback
|
347 |
+
|
348 |
+
traceback.print_exc()
|
349 |
+
finally:
|
350 |
+
# 显式关闭持久化上下文
|
351 |
+
if browser_context:
|
352 |
+
await browser_context.close()
|
353 |
+
|
354 |
+
# 关闭 Playwright 对象
|
355 |
+
if playwright:
|
356 |
+
await playwright.stop()
|
357 |
+
if browser:
|
358 |
+
await browser.close()
|
359 |
+
|
360 |
+
|
361 |
+
if __name__ == "__main__":
|
362 |
+
# asyncio.run(test_browser_use_org())
|
363 |
+
# asyncio.run(test_browser_use_parallel())
|
364 |
+
asyncio.run(test_browser_use_custom())
|
tests/test_deep_research.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
import sys
|
7 |
+
|
8 |
+
sys.path.append(".")
|
9 |
+
|
10 |
+
async def test_deep_research():
|
11 |
+
from src.utils.deep_research import deep_research
|
12 |
+
from src.utils import utils
|
13 |
+
|
14 |
+
task = "write a report about DeepSeek-R1, get its pdf"
|
15 |
+
llm = utils.get_llm_model(
|
16 |
+
provider="gemini",
|
17 |
+
model_name="gemini-2.0-flash-thinking-exp-01-21",
|
18 |
+
temperature=1.0,
|
19 |
+
api_key=os.getenv("GOOGLE_API_KEY", "")
|
20 |
+
)
|
21 |
+
|
22 |
+
report_content, report_file_path = await deep_research(task=task, llm=llm, agent_state=None,
|
23 |
+
max_search_iterations=1,
|
24 |
+
max_query_num=3,
|
25 |
+
use_own_browser=False)
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
asyncio.run(test_deep_research())
|
tests/test_llm_api.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pdb
|
3 |
+
from dataclasses import dataclass
|
4 |
+
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
7 |
+
from langchain_ollama import ChatOllama
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
import sys
|
12 |
+
|
13 |
+
sys.path.append(".")
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class LLMConfig:
|
17 |
+
provider: str
|
18 |
+
model_name: str
|
19 |
+
temperature: float = 0.8
|
20 |
+
base_url: str = None
|
21 |
+
api_key: str = None
|
22 |
+
|
23 |
+
def create_message_content(text, image_path=None):
|
24 |
+
content = [{"type": "text", "text": text}]
|
25 |
+
image_format = "png" if image_path and image_path.endswith(".png") else "jpeg"
|
26 |
+
if image_path:
|
27 |
+
from src.utils import utils
|
28 |
+
image_data = utils.encode_image(image_path)
|
29 |
+
content.append({
|
30 |
+
"type": "image_url",
|
31 |
+
"image_url": {"url": f"data:image/{image_format};base64,{image_data}"}
|
32 |
+
})
|
33 |
+
return content
|
34 |
+
|
35 |
+
def get_env_value(key, provider):
|
36 |
+
env_mappings = {
|
37 |
+
"openai": {"api_key": "OPENAI_API_KEY", "base_url": "OPENAI_ENDPOINT"},
|
38 |
+
"azure_openai": {"api_key": "AZURE_OPENAI_API_KEY", "base_url": "AZURE_OPENAI_ENDPOINT"},
|
39 |
+
"google": {"api_key": "GOOGLE_API_KEY"},
|
40 |
+
"deepseek": {"api_key": "DEEPSEEK_API_KEY", "base_url": "DEEPSEEK_ENDPOINT"},
|
41 |
+
"mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"},
|
42 |
+
"alibaba": {"api_key": "ALIBABA_API_KEY", "base_url": "ALIBABA_ENDPOINT"},
|
43 |
+
"moonshot":{"api_key": "MOONSHOT_API_KEY", "base_url": "MOONSHOT_ENDPOINT"},
|
44 |
+
}
|
45 |
+
|
46 |
+
if provider in env_mappings and key in env_mappings[provider]:
|
47 |
+
return os.getenv(env_mappings[provider][key], "")
|
48 |
+
return ""
|
49 |
+
|
50 |
+
def test_llm(config, query, image_path=None, system_message=None):
|
51 |
+
from src.utils import utils
|
52 |
+
|
53 |
+
# Special handling for Ollama-based models
|
54 |
+
if config.provider == "ollama":
|
55 |
+
if "deepseek-r1" in config.model_name:
|
56 |
+
from src.utils.llm import DeepSeekR1ChatOllama
|
57 |
+
llm = DeepSeekR1ChatOllama(model=config.model_name)
|
58 |
+
else:
|
59 |
+
llm = ChatOllama(model=config.model_name)
|
60 |
+
|
61 |
+
ai_msg = llm.invoke(query)
|
62 |
+
print(ai_msg.content)
|
63 |
+
if "deepseek-r1" in config.model_name:
|
64 |
+
pdb.set_trace()
|
65 |
+
return
|
66 |
+
|
67 |
+
# For other providers, use the standard configuration
|
68 |
+
llm = utils.get_llm_model(
|
69 |
+
provider=config.provider,
|
70 |
+
model_name=config.model_name,
|
71 |
+
temperature=config.temperature,
|
72 |
+
base_url=config.base_url or get_env_value("base_url", config.provider),
|
73 |
+
api_key=config.api_key or get_env_value("api_key", config.provider)
|
74 |
+
)
|
75 |
+
|
76 |
+
# Prepare messages for non-Ollama models
|
77 |
+
messages = []
|
78 |
+
if system_message:
|
79 |
+
messages.append(SystemMessage(content=create_message_content(system_message)))
|
80 |
+
messages.append(HumanMessage(content=create_message_content(query, image_path)))
|
81 |
+
ai_msg = llm.invoke(messages)
|
82 |
+
|
83 |
+
# Handle different response types
|
84 |
+
if hasattr(ai_msg, "reasoning_content"):
|
85 |
+
print(ai_msg.reasoning_content)
|
86 |
+
print(ai_msg.content)
|
87 |
+
|
88 |
+
if config.provider == "deepseek" and "deepseek-reasoner" in config.model_name:
|
89 |
+
print(llm.model_name)
|
90 |
+
pdb.set_trace()
|
91 |
+
|
92 |
+
def test_openai_model():
|
93 |
+
config = LLMConfig(provider="openai", model_name="gpt-4o")
|
94 |
+
test_llm(config, "Describe this image", "assets/examples/test.png")
|
95 |
+
|
96 |
+
def test_google_model():
|
97 |
+
# Enable your API key first if you haven't: https://ai.google.dev/palm_docs/oauth_quickstart
|
98 |
+
config = LLMConfig(provider="google", model_name="gemini-2.0-flash-exp")
|
99 |
+
test_llm(config, "Describe this image", "assets/examples/test.png")
|
100 |
+
|
101 |
+
def test_azure_openai_model():
|
102 |
+
config = LLMConfig(provider="azure_openai", model_name="gpt-4o")
|
103 |
+
test_llm(config, "Describe this image", "assets/examples/test.png")
|
104 |
+
|
105 |
+
def test_deepseek_model():
|
106 |
+
config = LLMConfig(provider="deepseek", model_name="deepseek-chat")
|
107 |
+
test_llm(config, "Who are you?")
|
108 |
+
|
109 |
+
def test_deepseek_r1_model():
|
110 |
+
config = LLMConfig(provider="deepseek", model_name="deepseek-reasoner")
|
111 |
+
test_llm(config, "Which is greater, 9.11 or 9.8?", system_message="You are a helpful AI assistant.")
|
112 |
+
|
113 |
+
def test_ollama_model():
|
114 |
+
config = LLMConfig(provider="ollama", model_name="qwen2.5:7b")
|
115 |
+
test_llm(config, "Sing a ballad of LangChain.")
|
116 |
+
|
117 |
+
def test_deepseek_r1_ollama_model():
|
118 |
+
config = LLMConfig(provider="ollama", model_name="deepseek-r1:14b")
|
119 |
+
test_llm(config, "How many 'r's are in the word 'strawberry'?")
|
120 |
+
|
121 |
+
def test_mistral_model():
|
122 |
+
config = LLMConfig(provider="mistral", model_name="pixtral-large-latest")
|
123 |
+
test_llm(config, "Describe this image", "assets/examples/test.png")
|
124 |
+
|
125 |
+
def test_moonshot_model():
|
126 |
+
config = LLMConfig(provider="moonshot", model_name="moonshot-v1-32k-vision-preview")
|
127 |
+
test_llm(config, "Describe this image", "assets/examples/test.png")
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
# test_openai_model()
|
131 |
+
# test_google_model()
|
132 |
+
# test_azure_openai_model()
|
133 |
+
#test_deepseek_model()
|
134 |
+
# test_ollama_model()
|
135 |
+
test_deepseek_r1_model()
|
136 |
+
# test_deepseek_r1_ollama_model()
|
137 |
+
# test_mistral_model()
|
tests/test_playwright.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
|
4 |
+
load_dotenv()
|
5 |
+
|
6 |
+
|
7 |
+
def test_connect_browser():
|
8 |
+
import os
|
9 |
+
from playwright.sync_api import sync_playwright
|
10 |
+
|
11 |
+
chrome_exe = os.getenv("CHROME_PATH", "")
|
12 |
+
chrome_use_data = os.getenv("CHROME_USER_DATA", "")
|
13 |
+
|
14 |
+
with sync_playwright() as p:
|
15 |
+
browser = p.chromium.launch_persistent_context(
|
16 |
+
user_data_dir=chrome_use_data,
|
17 |
+
executable_path=chrome_exe,
|
18 |
+
headless=False # Keep browser window visible
|
19 |
+
)
|
20 |
+
|
21 |
+
page = browser.new_page()
|
22 |
+
page.goto("https://mail.google.com/mail/u/0/#inbox")
|
23 |
+
page.wait_for_load_state()
|
24 |
+
|
25 |
+
input("Press the Enter key to close the browser...")
|
26 |
+
|
27 |
+
browser.close()
|
28 |
+
|
29 |
+
|
30 |
+
if __name__ == '__main__':
|
31 |
+
test_connect_browser()
|
webui.py
ADDED
@@ -0,0 +1,1203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
import logging
|
3 |
+
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
+
import os
|
8 |
+
import glob
|
9 |
+
import asyncio
|
10 |
+
import argparse
|
11 |
+
import os
|
12 |
+
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
import gradio as gr
|
16 |
+
import inspect
|
17 |
+
from functools import wraps
|
18 |
+
|
19 |
+
from browser_use.agent.service import Agent
|
20 |
+
from playwright.async_api import async_playwright
|
21 |
+
from browser_use.browser.browser import Browser, BrowserConfig
|
22 |
+
from browser_use.browser.context import (
|
23 |
+
BrowserContextConfig,
|
24 |
+
BrowserContextWindowSize,
|
25 |
+
)
|
26 |
+
from langchain_ollama import ChatOllama
|
27 |
+
from playwright.async_api import async_playwright
|
28 |
+
from src.utils.agent_state import AgentState
|
29 |
+
|
30 |
+
from src.utils import utils
|
31 |
+
from src.agent.custom_agent import CustomAgent
|
32 |
+
from src.browser.custom_browser import CustomBrowser
|
33 |
+
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
|
34 |
+
from src.browser.custom_context import BrowserContextConfig, CustomBrowserContext
|
35 |
+
from src.controller.custom_controller import CustomController
|
36 |
+
from gradio.themes import Citrus, Default, Glass, Monochrome, Ocean, Origin, Soft, Base
|
37 |
+
from src.utils.utils import update_model_dropdown, get_latest_files, capture_screenshot, MissingAPIKeyError
|
38 |
+
from src.utils import utils
|
39 |
+
|
40 |
+
# Global variables for persistence
|
41 |
+
_global_browser = None
|
42 |
+
_global_browser_context = None
|
43 |
+
_global_agent = None
|
44 |
+
|
45 |
+
# Create the global agent state instance
|
46 |
+
_global_agent_state = AgentState()
|
47 |
+
|
48 |
+
# webui config
|
49 |
+
webui_config_manager = utils.ConfigManager()
|
50 |
+
|
51 |
+
|
52 |
+
def scan_and_register_components(blocks):
|
53 |
+
"""扫描一个 Blocks 对象并注册其中的所有交互式组件,但不包括按钮"""
|
54 |
+
global webui_config_manager
|
55 |
+
|
56 |
+
def traverse_blocks(block, prefix=""):
|
57 |
+
registered = 0
|
58 |
+
|
59 |
+
# 处理 Blocks 自身的组件
|
60 |
+
if hasattr(block, "children"):
|
61 |
+
for i, child in enumerate(block.children):
|
62 |
+
if isinstance(child, gr.components.Component):
|
63 |
+
# 排除按钮 (Button) 组件
|
64 |
+
if getattr(child, "interactive", False) and not isinstance(child, gr.Button):
|
65 |
+
name = f"{prefix}component_{i}"
|
66 |
+
if hasattr(child, "label") and child.label:
|
67 |
+
# 使用标签作为名称的一部分
|
68 |
+
label = child.label
|
69 |
+
name = f"{prefix}{label}"
|
70 |
+
logger.debug(f"Registering component: {name}")
|
71 |
+
webui_config_manager.register_component(name, child)
|
72 |
+
registered += 1
|
73 |
+
elif hasattr(child, "children"):
|
74 |
+
# 递归处理嵌套的 Blocks
|
75 |
+
new_prefix = f"{prefix}block_{i}_"
|
76 |
+
registered += traverse_blocks(child, new_prefix)
|
77 |
+
|
78 |
+
return registered
|
79 |
+
|
80 |
+
total = traverse_blocks(blocks)
|
81 |
+
logger.info(f"Total registered components: {total}")
|
82 |
+
|
83 |
+
|
84 |
+
def save_current_config():
|
85 |
+
return webui_config_manager.save_current_config()
|
86 |
+
|
87 |
+
|
88 |
+
def update_ui_from_config(config_file):
|
89 |
+
return webui_config_manager.update_ui_from_config(config_file)
|
90 |
+
|
91 |
+
|
92 |
+
def resolve_sensitive_env_variables(text):
|
93 |
+
"""
|
94 |
+
Replace environment variable placeholders ($SENSITIVE_*) with their values.
|
95 |
+
Only replaces variables that start with SENSITIVE_.
|
96 |
+
"""
|
97 |
+
if not text:
|
98 |
+
return text
|
99 |
+
|
100 |
+
import re
|
101 |
+
|
102 |
+
# Find all $SENSITIVE_* patterns
|
103 |
+
env_vars = re.findall(r'\$SENSITIVE_[A-Za-z0-9_]*', text)
|
104 |
+
|
105 |
+
result = text
|
106 |
+
for var in env_vars:
|
107 |
+
# Remove the $ prefix to get the actual environment variable name
|
108 |
+
env_name = var[1:] # removes the $
|
109 |
+
env_value = os.getenv(env_name)
|
110 |
+
if env_value is not None:
|
111 |
+
# Replace $SENSITIVE_VAR_NAME with its value
|
112 |
+
result = result.replace(var, env_value)
|
113 |
+
|
114 |
+
return result
|
115 |
+
|
116 |
+
|
117 |
+
async def stop_agent():
|
118 |
+
"""Request the agent to stop and update UI with enhanced feedback"""
|
119 |
+
global _global_agent
|
120 |
+
|
121 |
+
try:
|
122 |
+
if _global_agent is not None:
|
123 |
+
# Request stop
|
124 |
+
_global_agent.stop()
|
125 |
+
# Update UI immediately
|
126 |
+
message = "Stop requested - the agent will halt at the next safe point"
|
127 |
+
logger.info(f"🛑 {message}")
|
128 |
+
|
129 |
+
# Return UI updates
|
130 |
+
return (
|
131 |
+
gr.update(value="Stopping...", interactive=False), # stop_button
|
132 |
+
gr.update(interactive=False), # run_button
|
133 |
+
)
|
134 |
+
except Exception as e:
|
135 |
+
error_msg = f"Error during stop: {str(e)}"
|
136 |
+
logger.error(error_msg)
|
137 |
+
return (
|
138 |
+
gr.update(value="Stop", interactive=True),
|
139 |
+
gr.update(interactive=True)
|
140 |
+
)
|
141 |
+
|
142 |
+
|
143 |
+
async def stop_research_agent():
|
144 |
+
"""Request the agent to stop and update UI with enhanced feedback"""
|
145 |
+
global _global_agent_state
|
146 |
+
|
147 |
+
try:
|
148 |
+
# Request stop
|
149 |
+
_global_agent_state.request_stop()
|
150 |
+
|
151 |
+
# Update UI immediately
|
152 |
+
message = "Stop requested - the agent will halt at the next safe point"
|
153 |
+
logger.info(f"🛑 {message}")
|
154 |
+
|
155 |
+
# Return UI updates
|
156 |
+
return ( # errors_output
|
157 |
+
gr.update(value="Stopping...", interactive=False), # stop_button
|
158 |
+
gr.update(interactive=False), # run_button
|
159 |
+
)
|
160 |
+
except Exception as e:
|
161 |
+
error_msg = f"Error during stop: {str(e)}"
|
162 |
+
logger.error(error_msg)
|
163 |
+
return (
|
164 |
+
gr.update(value="Stop", interactive=True),
|
165 |
+
gr.update(interactive=True)
|
166 |
+
)
|
167 |
+
|
168 |
+
|
169 |
+
async def run_browser_agent(
|
170 |
+
agent_type,
|
171 |
+
llm_provider,
|
172 |
+
llm_model_name,
|
173 |
+
llm_num_ctx,
|
174 |
+
llm_temperature,
|
175 |
+
llm_base_url,
|
176 |
+
llm_api_key,
|
177 |
+
use_own_browser,
|
178 |
+
keep_browser_open,
|
179 |
+
headless,
|
180 |
+
disable_security,
|
181 |
+
window_w,
|
182 |
+
window_h,
|
183 |
+
save_recording_path,
|
184 |
+
save_agent_history_path,
|
185 |
+
save_trace_path,
|
186 |
+
enable_recording,
|
187 |
+
task,
|
188 |
+
add_infos,
|
189 |
+
max_steps,
|
190 |
+
use_vision,
|
191 |
+
max_actions_per_step,
|
192 |
+
tool_calling_method,
|
193 |
+
chrome_cdp,
|
194 |
+
max_input_tokens
|
195 |
+
):
|
196 |
+
try:
|
197 |
+
# Disable recording if the checkbox is unchecked
|
198 |
+
if not enable_recording:
|
199 |
+
save_recording_path = None
|
200 |
+
|
201 |
+
# Ensure the recording directory exists if recording is enabled
|
202 |
+
if save_recording_path:
|
203 |
+
os.makedirs(save_recording_path, exist_ok=True)
|
204 |
+
|
205 |
+
# Get the list of existing videos before the agent runs
|
206 |
+
existing_videos = set()
|
207 |
+
if save_recording_path:
|
208 |
+
existing_videos = set(
|
209 |
+
glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
|
210 |
+
+ glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
|
211 |
+
)
|
212 |
+
|
213 |
+
task = resolve_sensitive_env_variables(task)
|
214 |
+
|
215 |
+
# Run the agent
|
216 |
+
llm = utils.get_llm_model(
|
217 |
+
provider=llm_provider,
|
218 |
+
model_name=llm_model_name,
|
219 |
+
num_ctx=llm_num_ctx,
|
220 |
+
temperature=llm_temperature,
|
221 |
+
base_url=llm_base_url,
|
222 |
+
api_key=llm_api_key,
|
223 |
+
)
|
224 |
+
if agent_type == "org":
|
225 |
+
final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_org_agent(
|
226 |
+
llm=llm,
|
227 |
+
use_own_browser=use_own_browser,
|
228 |
+
keep_browser_open=keep_browser_open,
|
229 |
+
headless=headless,
|
230 |
+
disable_security=disable_security,
|
231 |
+
window_w=window_w,
|
232 |
+
window_h=window_h,
|
233 |
+
save_recording_path=save_recording_path,
|
234 |
+
save_agent_history_path=save_agent_history_path,
|
235 |
+
save_trace_path=save_trace_path,
|
236 |
+
task=task,
|
237 |
+
max_steps=max_steps,
|
238 |
+
use_vision=use_vision,
|
239 |
+
max_actions_per_step=max_actions_per_step,
|
240 |
+
tool_calling_method=tool_calling_method,
|
241 |
+
chrome_cdp=chrome_cdp,
|
242 |
+
max_input_tokens=max_input_tokens
|
243 |
+
)
|
244 |
+
elif agent_type == "custom":
|
245 |
+
final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_custom_agent(
|
246 |
+
llm=llm,
|
247 |
+
use_own_browser=use_own_browser,
|
248 |
+
keep_browser_open=keep_browser_open,
|
249 |
+
headless=headless,
|
250 |
+
disable_security=disable_security,
|
251 |
+
window_w=window_w,
|
252 |
+
window_h=window_h,
|
253 |
+
save_recording_path=save_recording_path,
|
254 |
+
save_agent_history_path=save_agent_history_path,
|
255 |
+
save_trace_path=save_trace_path,
|
256 |
+
task=task,
|
257 |
+
add_infos=add_infos,
|
258 |
+
max_steps=max_steps,
|
259 |
+
use_vision=use_vision,
|
260 |
+
max_actions_per_step=max_actions_per_step,
|
261 |
+
tool_calling_method=tool_calling_method,
|
262 |
+
chrome_cdp=chrome_cdp,
|
263 |
+
max_input_tokens=max_input_tokens
|
264 |
+
)
|
265 |
+
else:
|
266 |
+
raise ValueError(f"Invalid agent type: {agent_type}")
|
267 |
+
|
268 |
+
# Get the list of videos after the agent runs (if recording is enabled)
|
269 |
+
# latest_video = None
|
270 |
+
# if save_recording_path:
|
271 |
+
# new_videos = set(
|
272 |
+
# glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
|
273 |
+
# + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
|
274 |
+
# )
|
275 |
+
# if new_videos - existing_videos:
|
276 |
+
# latest_video = list(new_videos - existing_videos)[0] # Get the first new video
|
277 |
+
|
278 |
+
gif_path = os.path.join(os.path.dirname(__file__), "agent_history.gif")
|
279 |
+
|
280 |
+
return (
|
281 |
+
final_result,
|
282 |
+
errors,
|
283 |
+
model_actions,
|
284 |
+
model_thoughts,
|
285 |
+
gif_path,
|
286 |
+
trace_file,
|
287 |
+
history_file,
|
288 |
+
gr.update(value="Stop", interactive=True), # Re-enable stop button
|
289 |
+
gr.update(interactive=True) # Re-enable run button
|
290 |
+
)
|
291 |
+
|
292 |
+
except MissingAPIKeyError as e:
|
293 |
+
logger.error(str(e))
|
294 |
+
raise gr.Error(str(e), print_exception=False)
|
295 |
+
|
296 |
+
except Exception as e:
|
297 |
+
import traceback
|
298 |
+
traceback.print_exc()
|
299 |
+
errors = str(e) + "\n" + traceback.format_exc()
|
300 |
+
return (
|
301 |
+
'', # final_result
|
302 |
+
errors, # errors
|
303 |
+
'', # model_actions
|
304 |
+
'', # model_thoughts
|
305 |
+
None, # latest_video
|
306 |
+
None, # history_file
|
307 |
+
None, # trace_file
|
308 |
+
gr.update(value="Stop", interactive=True), # Re-enable stop button
|
309 |
+
gr.update(interactive=True) # Re-enable run button
|
310 |
+
)
|
311 |
+
|
312 |
+
|
313 |
+
async def run_org_agent(
|
314 |
+
llm,
|
315 |
+
use_own_browser,
|
316 |
+
keep_browser_open,
|
317 |
+
headless,
|
318 |
+
disable_security,
|
319 |
+
window_w,
|
320 |
+
window_h,
|
321 |
+
save_recording_path,
|
322 |
+
save_agent_history_path,
|
323 |
+
save_trace_path,
|
324 |
+
task,
|
325 |
+
max_steps,
|
326 |
+
use_vision,
|
327 |
+
max_actions_per_step,
|
328 |
+
tool_calling_method,
|
329 |
+
chrome_cdp,
|
330 |
+
max_input_tokens
|
331 |
+
):
|
332 |
+
try:
|
333 |
+
global _global_browser, _global_browser_context, _global_agent
|
334 |
+
|
335 |
+
extra_chromium_args = ["--accept_downloads=True", f"--window-size={window_w},{window_h}"]
|
336 |
+
cdp_url = chrome_cdp
|
337 |
+
|
338 |
+
if use_own_browser:
|
339 |
+
cdp_url = os.getenv("CHROME_CDP", chrome_cdp)
|
340 |
+
chrome_path = os.getenv("CHROME_PATH", None)
|
341 |
+
if chrome_path == "":
|
342 |
+
chrome_path = None
|
343 |
+
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
|
344 |
+
if chrome_user_data:
|
345 |
+
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
|
346 |
+
else:
|
347 |
+
chrome_path = None
|
348 |
+
|
349 |
+
if _global_browser is None:
|
350 |
+
_global_browser = Browser(
|
351 |
+
config=BrowserConfig(
|
352 |
+
headless=headless,
|
353 |
+
cdp_url=cdp_url,
|
354 |
+
disable_security=disable_security,
|
355 |
+
chrome_instance_path=chrome_path,
|
356 |
+
extra_chromium_args=extra_chromium_args,
|
357 |
+
)
|
358 |
+
)
|
359 |
+
|
360 |
+
if _global_browser_context is None:
|
361 |
+
_global_browser_context = await _global_browser.new_context(
|
362 |
+
config=BrowserContextConfig(
|
363 |
+
trace_path=save_trace_path if save_trace_path else None,
|
364 |
+
save_recording_path=save_recording_path if save_recording_path else None,
|
365 |
+
save_downloads_path="./tmp/downloads",
|
366 |
+
no_viewport=False,
|
367 |
+
browser_window_size=BrowserContextWindowSize(
|
368 |
+
width=window_w, height=window_h
|
369 |
+
),
|
370 |
+
)
|
371 |
+
)
|
372 |
+
|
373 |
+
if _global_agent is None:
|
374 |
+
_global_agent = Agent(
|
375 |
+
task=task,
|
376 |
+
llm=llm,
|
377 |
+
use_vision=use_vision,
|
378 |
+
browser=_global_browser,
|
379 |
+
browser_context=_global_browser_context,
|
380 |
+
max_actions_per_step=max_actions_per_step,
|
381 |
+
tool_calling_method=tool_calling_method,
|
382 |
+
max_input_tokens=max_input_tokens,
|
383 |
+
generate_gif=True
|
384 |
+
)
|
385 |
+
history = await _global_agent.run(max_steps=max_steps)
|
386 |
+
|
387 |
+
history_file = os.path.join(save_agent_history_path, f"{_global_agent.state.agent_id}.json")
|
388 |
+
_global_agent.save_history(history_file)
|
389 |
+
|
390 |
+
final_result = history.final_result()
|
391 |
+
errors = history.errors()
|
392 |
+
model_actions = history.model_actions()
|
393 |
+
model_thoughts = history.model_thoughts()
|
394 |
+
|
395 |
+
trace_file = get_latest_files(save_trace_path)
|
396 |
+
|
397 |
+
return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
|
398 |
+
except Exception as e:
|
399 |
+
import traceback
|
400 |
+
traceback.print_exc()
|
401 |
+
errors = str(e) + "\n" + traceback.format_exc()
|
402 |
+
return '', errors, '', '', None, None
|
403 |
+
finally:
|
404 |
+
_global_agent = None
|
405 |
+
# Handle cleanup based on persistence configuration
|
406 |
+
if not keep_browser_open:
|
407 |
+
if _global_browser_context:
|
408 |
+
await _global_browser_context.close()
|
409 |
+
_global_browser_context = None
|
410 |
+
|
411 |
+
if _global_browser:
|
412 |
+
await _global_browser.close()
|
413 |
+
_global_browser = None
|
414 |
+
|
415 |
+
|
416 |
+
async def run_custom_agent(
|
417 |
+
llm,
|
418 |
+
use_own_browser,
|
419 |
+
keep_browser_open,
|
420 |
+
headless,
|
421 |
+
disable_security,
|
422 |
+
window_w,
|
423 |
+
window_h,
|
424 |
+
save_recording_path,
|
425 |
+
save_agent_history_path,
|
426 |
+
save_trace_path,
|
427 |
+
task,
|
428 |
+
add_infos,
|
429 |
+
max_steps,
|
430 |
+
use_vision,
|
431 |
+
max_actions_per_step,
|
432 |
+
tool_calling_method,
|
433 |
+
chrome_cdp,
|
434 |
+
max_input_tokens
|
435 |
+
):
|
436 |
+
try:
|
437 |
+
global _global_browser, _global_browser_context, _global_agent
|
438 |
+
|
439 |
+
extra_chromium_args = ["--accept_downloads=True", f"--window-size={window_w},{window_h}"]
|
440 |
+
cdp_url = chrome_cdp
|
441 |
+
if use_own_browser:
|
442 |
+
cdp_url = os.getenv("CHROME_CDP", chrome_cdp)
|
443 |
+
|
444 |
+
chrome_path = os.getenv("CHROME_PATH", None)
|
445 |
+
if chrome_path == "":
|
446 |
+
chrome_path = None
|
447 |
+
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
|
448 |
+
if chrome_user_data:
|
449 |
+
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
|
450 |
+
else:
|
451 |
+
chrome_path = None
|
452 |
+
|
453 |
+
controller = CustomController()
|
454 |
+
|
455 |
+
# Initialize global browser if needed
|
456 |
+
# if chrome_cdp not empty string nor None
|
457 |
+
if (_global_browser is None) or (cdp_url and cdp_url != "" and cdp_url != None):
|
458 |
+
_global_browser = CustomBrowser(
|
459 |
+
config=BrowserConfig(
|
460 |
+
headless=headless,
|
461 |
+
disable_security=disable_security,
|
462 |
+
cdp_url=cdp_url,
|
463 |
+
chrome_instance_path=chrome_path,
|
464 |
+
extra_chromium_args=extra_chromium_args,
|
465 |
+
)
|
466 |
+
)
|
467 |
+
|
468 |
+
if _global_browser_context is None or (chrome_cdp and cdp_url != "" and cdp_url != None):
|
469 |
+
_global_browser_context = await _global_browser.new_context(
|
470 |
+
config=BrowserContextConfig(
|
471 |
+
trace_path=save_trace_path if save_trace_path else None,
|
472 |
+
save_recording_path=save_recording_path if save_recording_path else None,
|
473 |
+
no_viewport=False,
|
474 |
+
save_downloads_path="./tmp/downloads",
|
475 |
+
browser_window_size=BrowserContextWindowSize(
|
476 |
+
width=window_w, height=window_h
|
477 |
+
),
|
478 |
+
)
|
479 |
+
)
|
480 |
+
|
481 |
+
# Create and run agent
|
482 |
+
if _global_agent is None:
|
483 |
+
_global_agent = CustomAgent(
|
484 |
+
task=task,
|
485 |
+
add_infos=add_infos,
|
486 |
+
use_vision=use_vision,
|
487 |
+
llm=llm,
|
488 |
+
browser=_global_browser,
|
489 |
+
browser_context=_global_browser_context,
|
490 |
+
controller=controller,
|
491 |
+
system_prompt_class=CustomSystemPrompt,
|
492 |
+
agent_prompt_class=CustomAgentMessagePrompt,
|
493 |
+
max_actions_per_step=max_actions_per_step,
|
494 |
+
tool_calling_method=tool_calling_method,
|
495 |
+
max_input_tokens=max_input_tokens,
|
496 |
+
generate_gif=True
|
497 |
+
)
|
498 |
+
history = await _global_agent.run(max_steps=max_steps)
|
499 |
+
|
500 |
+
history_file = os.path.join(save_agent_history_path, f"{_global_agent.state.agent_id}.json")
|
501 |
+
_global_agent.save_history(history_file)
|
502 |
+
|
503 |
+
final_result = history.final_result()
|
504 |
+
errors = history.errors()
|
505 |
+
model_actions = history.model_actions()
|
506 |
+
model_thoughts = history.model_thoughts()
|
507 |
+
|
508 |
+
trace_file = get_latest_files(save_trace_path)
|
509 |
+
|
510 |
+
return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
|
511 |
+
except Exception as e:
|
512 |
+
import traceback
|
513 |
+
traceback.print_exc()
|
514 |
+
errors = str(e) + "\n" + traceback.format_exc()
|
515 |
+
return '', errors, '', '', None, None
|
516 |
+
finally:
|
517 |
+
_global_agent = None
|
518 |
+
# Handle cleanup based on persistence configuration
|
519 |
+
if not keep_browser_open:
|
520 |
+
if _global_browser_context:
|
521 |
+
await _global_browser_context.close()
|
522 |
+
_global_browser_context = None
|
523 |
+
|
524 |
+
if _global_browser:
|
525 |
+
await _global_browser.close()
|
526 |
+
_global_browser = None
|
527 |
+
|
528 |
+
|
529 |
+
async def run_with_stream(
|
530 |
+
agent_type,
|
531 |
+
llm_provider,
|
532 |
+
llm_model_name,
|
533 |
+
llm_num_ctx,
|
534 |
+
llm_temperature,
|
535 |
+
llm_base_url,
|
536 |
+
llm_api_key,
|
537 |
+
use_own_browser,
|
538 |
+
keep_browser_open,
|
539 |
+
headless,
|
540 |
+
disable_security,
|
541 |
+
window_w,
|
542 |
+
window_h,
|
543 |
+
save_recording_path,
|
544 |
+
save_agent_history_path,
|
545 |
+
save_trace_path,
|
546 |
+
enable_recording,
|
547 |
+
task,
|
548 |
+
add_infos,
|
549 |
+
max_steps,
|
550 |
+
use_vision,
|
551 |
+
max_actions_per_step,
|
552 |
+
tool_calling_method,
|
553 |
+
chrome_cdp,
|
554 |
+
max_input_tokens
|
555 |
+
):
|
556 |
+
global _global_agent
|
557 |
+
|
558 |
+
stream_vw = 80
|
559 |
+
stream_vh = int(80 * window_h // window_w)
|
560 |
+
if not headless:
|
561 |
+
result = await run_browser_agent(
|
562 |
+
agent_type=agent_type,
|
563 |
+
llm_provider=llm_provider,
|
564 |
+
llm_model_name=llm_model_name,
|
565 |
+
llm_num_ctx=llm_num_ctx,
|
566 |
+
llm_temperature=llm_temperature,
|
567 |
+
llm_base_url=llm_base_url,
|
568 |
+
llm_api_key=llm_api_key,
|
569 |
+
use_own_browser=use_own_browser,
|
570 |
+
keep_browser_open=keep_browser_open,
|
571 |
+
headless=headless,
|
572 |
+
disable_security=disable_security,
|
573 |
+
window_w=window_w,
|
574 |
+
window_h=window_h,
|
575 |
+
save_recording_path=save_recording_path,
|
576 |
+
save_agent_history_path=save_agent_history_path,
|
577 |
+
save_trace_path=save_trace_path,
|
578 |
+
enable_recording=enable_recording,
|
579 |
+
task=task,
|
580 |
+
add_infos=add_infos,
|
581 |
+
max_steps=max_steps,
|
582 |
+
use_vision=use_vision,
|
583 |
+
max_actions_per_step=max_actions_per_step,
|
584 |
+
tool_calling_method=tool_calling_method,
|
585 |
+
chrome_cdp=chrome_cdp,
|
586 |
+
max_input_tokens=max_input_tokens
|
587 |
+
)
|
588 |
+
# Add HTML content at the start of the result array
|
589 |
+
yield [gr.update(visible=False)] + list(result)
|
590 |
+
else:
|
591 |
+
try:
|
592 |
+
# Run the browser agent in the background
|
593 |
+
agent_task = asyncio.create_task(
|
594 |
+
run_browser_agent(
|
595 |
+
agent_type=agent_type,
|
596 |
+
llm_provider=llm_provider,
|
597 |
+
llm_model_name=llm_model_name,
|
598 |
+
llm_num_ctx=llm_num_ctx,
|
599 |
+
llm_temperature=llm_temperature,
|
600 |
+
llm_base_url=llm_base_url,
|
601 |
+
llm_api_key=llm_api_key,
|
602 |
+
use_own_browser=use_own_browser,
|
603 |
+
keep_browser_open=keep_browser_open,
|
604 |
+
headless=headless,
|
605 |
+
disable_security=disable_security,
|
606 |
+
window_w=window_w,
|
607 |
+
window_h=window_h,
|
608 |
+
save_recording_path=save_recording_path,
|
609 |
+
save_agent_history_path=save_agent_history_path,
|
610 |
+
save_trace_path=save_trace_path,
|
611 |
+
enable_recording=enable_recording,
|
612 |
+
task=task,
|
613 |
+
add_infos=add_infos,
|
614 |
+
max_steps=max_steps,
|
615 |
+
use_vision=use_vision,
|
616 |
+
max_actions_per_step=max_actions_per_step,
|
617 |
+
tool_calling_method=tool_calling_method,
|
618 |
+
chrome_cdp=chrome_cdp,
|
619 |
+
max_input_tokens=max_input_tokens
|
620 |
+
)
|
621 |
+
)
|
622 |
+
|
623 |
+
# Initialize values for streaming
|
624 |
+
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
|
625 |
+
final_result = errors = model_actions = model_thoughts = ""
|
626 |
+
recording_gif = trace = history_file = None
|
627 |
+
|
628 |
+
# Periodically update the stream while the agent task is running
|
629 |
+
while not agent_task.done():
|
630 |
+
try:
|
631 |
+
encoded_screenshot = await capture_screenshot(_global_browser_context)
|
632 |
+
if encoded_screenshot is not None:
|
633 |
+
html_content = f'<img src="data:image/jpeg;base64,{encoded_screenshot}" style="width:{stream_vw}vw; height:{stream_vh}vh ; border:1px solid #ccc;">'
|
634 |
+
else:
|
635 |
+
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
|
636 |
+
except Exception as e:
|
637 |
+
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
|
638 |
+
|
639 |
+
if _global_agent and _global_agent.state.stopped:
|
640 |
+
yield [
|
641 |
+
gr.HTML(value=html_content, visible=True),
|
642 |
+
final_result,
|
643 |
+
errors,
|
644 |
+
model_actions,
|
645 |
+
model_thoughts,
|
646 |
+
recording_gif,
|
647 |
+
trace,
|
648 |
+
history_file,
|
649 |
+
gr.update(value="Stopping...", interactive=False), # stop_button
|
650 |
+
gr.update(interactive=False), # run_button
|
651 |
+
]
|
652 |
+
break
|
653 |
+
else:
|
654 |
+
yield [
|
655 |
+
gr.HTML(value=html_content, visible=True),
|
656 |
+
final_result,
|
657 |
+
errors,
|
658 |
+
model_actions,
|
659 |
+
model_thoughts,
|
660 |
+
recording_gif,
|
661 |
+
trace,
|
662 |
+
history_file,
|
663 |
+
gr.update(), # Re-enable stop button
|
664 |
+
gr.update() # Re-enable run button
|
665 |
+
]
|
666 |
+
await asyncio.sleep(0.1)
|
667 |
+
|
668 |
+
# Once the agent task completes, get the results
|
669 |
+
try:
|
670 |
+
result = await agent_task
|
671 |
+
final_result, errors, model_actions, model_thoughts, recording_gif, trace, history_file, stop_button, run_button = result
|
672 |
+
except gr.Error:
|
673 |
+
final_result = ""
|
674 |
+
model_actions = ""
|
675 |
+
model_thoughts = ""
|
676 |
+
recording_gif = trace = history_file = None
|
677 |
+
|
678 |
+
except Exception as e:
|
679 |
+
errors = f"Agent error: {str(e)}"
|
680 |
+
|
681 |
+
yield [
|
682 |
+
gr.HTML(value=html_content, visible=True),
|
683 |
+
final_result,
|
684 |
+
errors,
|
685 |
+
model_actions,
|
686 |
+
model_thoughts,
|
687 |
+
recording_gif,
|
688 |
+
trace,
|
689 |
+
history_file,
|
690 |
+
stop_button,
|
691 |
+
run_button
|
692 |
+
]
|
693 |
+
|
694 |
+
except Exception as e:
|
695 |
+
import traceback
|
696 |
+
yield [
|
697 |
+
gr.HTML(
|
698 |
+
value=f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>",
|
699 |
+
visible=True),
|
700 |
+
"",
|
701 |
+
f"Error: {str(e)}\n{traceback.format_exc()}",
|
702 |
+
"",
|
703 |
+
"",
|
704 |
+
None,
|
705 |
+
None,
|
706 |
+
None,
|
707 |
+
gr.update(value="Stop", interactive=True), # Re-enable stop button
|
708 |
+
gr.update(interactive=True) # Re-enable run button
|
709 |
+
]
|
710 |
+
|
711 |
+
|
712 |
+
# Define the theme map globally
|
713 |
+
theme_map = {
|
714 |
+
"Default": Default(),
|
715 |
+
"Soft": Soft(),
|
716 |
+
"Monochrome": Monochrome(),
|
717 |
+
"Glass": Glass(),
|
718 |
+
"Origin": Origin(),
|
719 |
+
"Citrus": Citrus(),
|
720 |
+
"Ocean": Ocean(),
|
721 |
+
"Base": Base()
|
722 |
+
}
|
723 |
+
|
724 |
+
|
725 |
+
async def close_global_browser():
|
726 |
+
global _global_browser, _global_browser_context
|
727 |
+
|
728 |
+
if _global_browser_context:
|
729 |
+
await _global_browser_context.close()
|
730 |
+
_global_browser_context = None
|
731 |
+
|
732 |
+
if _global_browser:
|
733 |
+
await _global_browser.close()
|
734 |
+
_global_browser = None
|
735 |
+
|
736 |
+
|
737 |
+
async def run_deep_search(research_task, max_search_iteration_input, max_query_per_iter_input, llm_provider,
|
738 |
+
llm_model_name, llm_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
|
739 |
+
use_own_browser, headless, chrome_cdp):
|
740 |
+
from src.utils.deep_research import deep_research
|
741 |
+
global _global_agent_state
|
742 |
+
|
743 |
+
# Clear any previous stop request
|
744 |
+
_global_agent_state.clear_stop()
|
745 |
+
|
746 |
+
llm = utils.get_llm_model(
|
747 |
+
provider=llm_provider,
|
748 |
+
model_name=llm_model_name,
|
749 |
+
num_ctx=llm_num_ctx,
|
750 |
+
temperature=llm_temperature,
|
751 |
+
base_url=llm_base_url,
|
752 |
+
api_key=llm_api_key,
|
753 |
+
)
|
754 |
+
markdown_content, file_path = await deep_research(research_task, llm, _global_agent_state,
|
755 |
+
max_search_iterations=max_search_iteration_input,
|
756 |
+
max_query_num=max_query_per_iter_input,
|
757 |
+
use_vision=use_vision,
|
758 |
+
headless=headless,
|
759 |
+
use_own_browser=use_own_browser,
|
760 |
+
chrome_cdp=chrome_cdp
|
761 |
+
)
|
762 |
+
|
763 |
+
return markdown_content, file_path, gr.update(value="Stop", interactive=True), gr.update(interactive=True)
|
764 |
+
|
765 |
+
|
766 |
+
def create_ui(theme_name="Ocean"):
|
767 |
+
css = """
|
768 |
+
.gradio-container {
|
769 |
+
width: 60vw !important;
|
770 |
+
max-width: 60% !important;
|
771 |
+
margin-left: auto !important;
|
772 |
+
margin-right: auto !important;
|
773 |
+
padding-top: 20px !important;
|
774 |
+
}
|
775 |
+
.header-text {
|
776 |
+
text-align: center;
|
777 |
+
margin-bottom: 30px;
|
778 |
+
}
|
779 |
+
.theme-section {
|
780 |
+
margin-bottom: 20px;
|
781 |
+
padding: 15px;
|
782 |
+
border-radius: 10px;
|
783 |
+
}
|
784 |
+
"""
|
785 |
+
|
786 |
+
with gr.Blocks(
|
787 |
+
title="Browser Use WebUI", theme=theme_map[theme_name], css=css
|
788 |
+
) as demo:
|
789 |
+
with gr.Row():
|
790 |
+
gr.Markdown(
|
791 |
+
"""
|
792 |
+
# 🌐 Browser Use WebUI
|
793 |
+
### Control your browser with AI assistance
|
794 |
+
""",
|
795 |
+
elem_classes=["header-text"],
|
796 |
+
)
|
797 |
+
|
798 |
+
with gr.Tabs() as tabs:
|
799 |
+
with gr.TabItem("⚙️ Agent Settings", id=1):
|
800 |
+
with gr.Group():
|
801 |
+
agent_type = gr.Radio(
|
802 |
+
["org", "custom"],
|
803 |
+
label="Agent Type",
|
804 |
+
value="custom",
|
805 |
+
info="Select the type of agent to use",
|
806 |
+
interactive=True
|
807 |
+
)
|
808 |
+
with gr.Column():
|
809 |
+
max_steps = gr.Slider(
|
810 |
+
minimum=1,
|
811 |
+
maximum=200,
|
812 |
+
value=100,
|
813 |
+
step=1,
|
814 |
+
label="Max Run Steps",
|
815 |
+
info="Maximum number of steps the agent will take",
|
816 |
+
interactive=True
|
817 |
+
)
|
818 |
+
max_actions_per_step = gr.Slider(
|
819 |
+
minimum=1,
|
820 |
+
maximum=100,
|
821 |
+
value=10,
|
822 |
+
step=1,
|
823 |
+
label="Max Actions per Step",
|
824 |
+
info="Maximum number of actions the agent will take per step",
|
825 |
+
interactive=True
|
826 |
+
)
|
827 |
+
with gr.Column():
|
828 |
+
use_vision = gr.Checkbox(
|
829 |
+
label="Use Vision",
|
830 |
+
value=True,
|
831 |
+
info="Enable visual processing capabilities",
|
832 |
+
interactive=True
|
833 |
+
)
|
834 |
+
max_input_tokens = gr.Number(
|
835 |
+
label="Max Input Tokens",
|
836 |
+
value=128000,
|
837 |
+
precision=0,
|
838 |
+
interactive=True
|
839 |
+
)
|
840 |
+
tool_calling_method = gr.Dropdown(
|
841 |
+
label="Tool Calling Method",
|
842 |
+
value="auto",
|
843 |
+
interactive=True,
|
844 |
+
allow_custom_value=True, # Allow users to input custom model names
|
845 |
+
choices=["auto", "json_schema", "function_calling"],
|
846 |
+
info="Tool Calls Funtion Name",
|
847 |
+
visible=False
|
848 |
+
)
|
849 |
+
|
850 |
+
with gr.TabItem("🔧 LLM Settings", id=2):
|
851 |
+
with gr.Group():
|
852 |
+
llm_provider = gr.Dropdown(
|
853 |
+
choices=[provider for provider, model in utils.model_names.items()],
|
854 |
+
label="LLM Provider",
|
855 |
+
value="openai",
|
856 |
+
info="Select your preferred language model provider",
|
857 |
+
interactive=True
|
858 |
+
)
|
859 |
+
llm_model_name = gr.Dropdown(
|
860 |
+
label="Model Name",
|
861 |
+
choices=utils.model_names['openai'],
|
862 |
+
value="gpt-4o",
|
863 |
+
interactive=True,
|
864 |
+
allow_custom_value=True, # Allow users to input custom model names
|
865 |
+
info="Select a model in the dropdown options or directly type a custom model name"
|
866 |
+
)
|
867 |
+
ollama_num_ctx = gr.Slider(
|
868 |
+
minimum=2 ** 8,
|
869 |
+
maximum=2 ** 16,
|
870 |
+
value=16000,
|
871 |
+
step=1,
|
872 |
+
label="Ollama Context Length",
|
873 |
+
info="Controls max context length model needs to handle (less = faster)",
|
874 |
+
visible=False,
|
875 |
+
interactive=True
|
876 |
+
)
|
877 |
+
llm_temperature = gr.Slider(
|
878 |
+
minimum=0.0,
|
879 |
+
maximum=2.0,
|
880 |
+
value=0.6,
|
881 |
+
step=0.1,
|
882 |
+
label="Temperature",
|
883 |
+
info="Controls randomness in model outputs",
|
884 |
+
interactive=True
|
885 |
+
)
|
886 |
+
with gr.Row():
|
887 |
+
llm_base_url = gr.Textbox(
|
888 |
+
label="Base URL",
|
889 |
+
value="",
|
890 |
+
info="API endpoint URL (if required)"
|
891 |
+
)
|
892 |
+
llm_api_key = gr.Textbox(
|
893 |
+
label="API Key",
|
894 |
+
type="password",
|
895 |
+
value="",
|
896 |
+
info="Your API key (leave blank to use .env)"
|
897 |
+
)
|
898 |
+
|
899 |
+
# Change event to update context length slider
|
900 |
+
def update_llm_num_ctx_visibility(llm_provider):
|
901 |
+
return gr.update(visible=llm_provider == "ollama")
|
902 |
+
|
903 |
+
# Bind the change event of llm_provider to update the visibility of context length slider
|
904 |
+
llm_provider.change(
|
905 |
+
fn=update_llm_num_ctx_visibility,
|
906 |
+
inputs=llm_provider,
|
907 |
+
outputs=ollama_num_ctx
|
908 |
+
)
|
909 |
+
|
910 |
+
with gr.TabItem("🌐 Browser Settings", id=3):
|
911 |
+
with gr.Group():
|
912 |
+
with gr.Row():
|
913 |
+
use_own_browser = gr.Checkbox(
|
914 |
+
label="Use Own Browser",
|
915 |
+
value=False,
|
916 |
+
info="Use your existing browser instance",
|
917 |
+
interactive=True
|
918 |
+
)
|
919 |
+
keep_browser_open = gr.Checkbox(
|
920 |
+
label="Keep Browser Open",
|
921 |
+
value=False,
|
922 |
+
info="Keep Browser Open between Tasks",
|
923 |
+
interactive=True
|
924 |
+
)
|
925 |
+
headless = gr.Checkbox(
|
926 |
+
label="Headless Mode",
|
927 |
+
value=False,
|
928 |
+
info="Run browser without GUI",
|
929 |
+
interactive=True
|
930 |
+
)
|
931 |
+
disable_security = gr.Checkbox(
|
932 |
+
label="Disable Security",
|
933 |
+
value=True,
|
934 |
+
info="Disable browser security features",
|
935 |
+
interactive=True
|
936 |
+
)
|
937 |
+
enable_recording = gr.Checkbox(
|
938 |
+
label="Enable Recording",
|
939 |
+
value=True,
|
940 |
+
info="Enable saving browser recordings",
|
941 |
+
interactive=True
|
942 |
+
)
|
943 |
+
|
944 |
+
with gr.Row():
|
945 |
+
window_w = gr.Number(
|
946 |
+
label="Window Width",
|
947 |
+
value=1280,
|
948 |
+
info="Browser window width",
|
949 |
+
interactive=True
|
950 |
+
)
|
951 |
+
window_h = gr.Number(
|
952 |
+
label="Window Height",
|
953 |
+
value=1100,
|
954 |
+
info="Browser window height",
|
955 |
+
interactive=True
|
956 |
+
)
|
957 |
+
|
958 |
+
chrome_cdp = gr.Textbox(
|
959 |
+
label="CDP URL",
|
960 |
+
placeholder="http://localhost:9222",
|
961 |
+
value="",
|
962 |
+
info="CDP for google remote debugging",
|
963 |
+
interactive=True, # Allow editing only if recording is enabled
|
964 |
+
)
|
965 |
+
|
966 |
+
save_recording_path = gr.Textbox(
|
967 |
+
label="Recording Path",
|
968 |
+
placeholder="e.g. ./tmp/record_videos",
|
969 |
+
value="./tmp/record_videos",
|
970 |
+
info="Path to save browser recordings",
|
971 |
+
interactive=True, # Allow editing only if recording is enabled
|
972 |
+
)
|
973 |
+
|
974 |
+
save_trace_path = gr.Textbox(
|
975 |
+
label="Trace Path",
|
976 |
+
placeholder="e.g. ./tmp/traces",
|
977 |
+
value="./tmp/traces",
|
978 |
+
info="Path to save Agent traces",
|
979 |
+
interactive=True,
|
980 |
+
)
|
981 |
+
|
982 |
+
save_agent_history_path = gr.Textbox(
|
983 |
+
label="Agent History Save Path",
|
984 |
+
placeholder="e.g., ./tmp/agent_history",
|
985 |
+
value="./tmp/agent_history",
|
986 |
+
info="Specify the directory where agent history should be saved.",
|
987 |
+
interactive=True,
|
988 |
+
)
|
989 |
+
|
990 |
+
with gr.TabItem("🤖 Run Agent", id=4):
|
991 |
+
task = gr.Textbox(
|
992 |
+
label="Task Description",
|
993 |
+
lines=4,
|
994 |
+
placeholder="Enter your task here...",
|
995 |
+
value="go to google.com and type 'OpenAI' click search and give me the first url",
|
996 |
+
info="Describe what you want the agent to do",
|
997 |
+
interactive=True
|
998 |
+
)
|
999 |
+
add_infos = gr.Textbox(
|
1000 |
+
label="Additional Information",
|
1001 |
+
lines=3,
|
1002 |
+
placeholder="Add any helpful context or instructions...",
|
1003 |
+
info="Optional hints to help the LLM complete the task",
|
1004 |
+
value="",
|
1005 |
+
interactive=True
|
1006 |
+
)
|
1007 |
+
|
1008 |
+
with gr.Row():
|
1009 |
+
run_button = gr.Button("▶️ Run Agent", variant="primary", scale=2)
|
1010 |
+
stop_button = gr.Button("⏹️ Stop", variant="stop", scale=1)
|
1011 |
+
|
1012 |
+
with gr.Row():
|
1013 |
+
browser_view = gr.HTML(
|
1014 |
+
value="<h1 style='width:80vw; height:50vh'>Waiting for browser session...</h1>",
|
1015 |
+
label="Live Browser View",
|
1016 |
+
visible=False
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
gr.Markdown("### Results")
|
1020 |
+
with gr.Row():
|
1021 |
+
with gr.Column():
|
1022 |
+
final_result_output = gr.Textbox(
|
1023 |
+
label="Final Result", lines=3, show_label=True
|
1024 |
+
)
|
1025 |
+
with gr.Column():
|
1026 |
+
errors_output = gr.Textbox(
|
1027 |
+
label="Errors", lines=3, show_label=True
|
1028 |
+
)
|
1029 |
+
with gr.Row():
|
1030 |
+
with gr.Column():
|
1031 |
+
model_actions_output = gr.Textbox(
|
1032 |
+
label="Model Actions", lines=3, show_label=True, visible=False
|
1033 |
+
)
|
1034 |
+
with gr.Column():
|
1035 |
+
model_thoughts_output = gr.Textbox(
|
1036 |
+
label="Model Thoughts", lines=3, show_label=True, visible=False
|
1037 |
+
)
|
1038 |
+
recording_gif = gr.Image(label="Result GIF", format="gif")
|
1039 |
+
trace_file = gr.File(label="Trace File")
|
1040 |
+
agent_history_file = gr.File(label="Agent History")
|
1041 |
+
|
1042 |
+
with gr.TabItem("🧐 Deep Research", id=5):
|
1043 |
+
research_task_input = gr.Textbox(label="Research Task", lines=5,
|
1044 |
+
value="Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature.",
|
1045 |
+
interactive=True)
|
1046 |
+
with gr.Row():
|
1047 |
+
max_search_iteration_input = gr.Number(label="Max Search Iteration", value=3,
|
1048 |
+
precision=0,
|
1049 |
+
interactive=True) # precision=0 确保是整数
|
1050 |
+
max_query_per_iter_input = gr.Number(label="Max Query per Iteration", value=1,
|
1051 |
+
precision=0,
|
1052 |
+
interactive=True) # precision=0 确保是整数
|
1053 |
+
with gr.Row():
|
1054 |
+
research_button = gr.Button("▶️ Run Deep Research", variant="primary", scale=2)
|
1055 |
+
stop_research_button = gr.Button("⏹ Stop", variant="stop", scale=1)
|
1056 |
+
markdown_output_display = gr.Markdown(label="Research Report")
|
1057 |
+
markdown_download = gr.File(label="Download Research Report")
|
1058 |
+
|
1059 |
+
# Bind the stop button click event after errors_output is defined
|
1060 |
+
stop_button.click(
|
1061 |
+
fn=stop_agent,
|
1062 |
+
inputs=[],
|
1063 |
+
outputs=[stop_button, run_button],
|
1064 |
+
)
|
1065 |
+
|
1066 |
+
# Run button click handler
|
1067 |
+
run_button.click(
|
1068 |
+
fn=run_with_stream,
|
1069 |
+
inputs=[
|
1070 |
+
agent_type, llm_provider, llm_model_name, ollama_num_ctx, llm_temperature, llm_base_url,
|
1071 |
+
llm_api_key,
|
1072 |
+
use_own_browser, keep_browser_open, headless, disable_security, window_w, window_h,
|
1073 |
+
save_recording_path, save_agent_history_path, save_trace_path, # Include the new path
|
1074 |
+
enable_recording, task, add_infos, max_steps, use_vision, max_actions_per_step,
|
1075 |
+
tool_calling_method, chrome_cdp, max_input_tokens
|
1076 |
+
],
|
1077 |
+
outputs=[
|
1078 |
+
browser_view, # Browser view
|
1079 |
+
final_result_output, # Final result
|
1080 |
+
errors_output, # Errors
|
1081 |
+
model_actions_output, # Model actions
|
1082 |
+
model_thoughts_output, # Model thoughts
|
1083 |
+
recording_gif, # Latest recording
|
1084 |
+
trace_file, # Trace file
|
1085 |
+
agent_history_file, # Agent history file
|
1086 |
+
stop_button, # Stop button
|
1087 |
+
run_button # Run button
|
1088 |
+
],
|
1089 |
+
)
|
1090 |
+
|
1091 |
+
# Run Deep Research
|
1092 |
+
research_button.click(
|
1093 |
+
fn=run_deep_search,
|
1094 |
+
inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider,
|
1095 |
+
llm_model_name, ollama_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
|
1096 |
+
use_own_browser, headless, chrome_cdp],
|
1097 |
+
outputs=[markdown_output_display, markdown_download, stop_research_button, research_button]
|
1098 |
+
)
|
1099 |
+
# Bind the stop button click event after errors_output is defined
|
1100 |
+
stop_research_button.click(
|
1101 |
+
fn=stop_research_agent,
|
1102 |
+
inputs=[],
|
1103 |
+
outputs=[stop_research_button, research_button],
|
1104 |
+
)
|
1105 |
+
|
1106 |
+
with gr.TabItem("🎥 Recordings", id=7, visible=True):
|
1107 |
+
def list_recordings(save_recording_path):
|
1108 |
+
if not os.path.exists(save_recording_path):
|
1109 |
+
return []
|
1110 |
+
|
1111 |
+
# Get all video files
|
1112 |
+
recordings = glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4")) + glob.glob(
|
1113 |
+
os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
|
1114 |
+
|
1115 |
+
# Sort recordings by creation time (oldest first)
|
1116 |
+
recordings.sort(key=os.path.getctime)
|
1117 |
+
|
1118 |
+
# Add numbering to the recordings
|
1119 |
+
numbered_recordings = []
|
1120 |
+
for idx, recording in enumerate(recordings, start=1):
|
1121 |
+
filename = os.path.basename(recording)
|
1122 |
+
numbered_recordings.append((recording, f"{idx}. {filename}"))
|
1123 |
+
|
1124 |
+
return numbered_recordings
|
1125 |
+
|
1126 |
+
recordings_gallery = gr.Gallery(
|
1127 |
+
label="Recordings",
|
1128 |
+
columns=3,
|
1129 |
+
height="auto",
|
1130 |
+
object_fit="contain"
|
1131 |
+
)
|
1132 |
+
|
1133 |
+
refresh_button = gr.Button("🔄 Refresh Recordings", variant="secondary")
|
1134 |
+
refresh_button.click(
|
1135 |
+
fn=list_recordings,
|
1136 |
+
inputs=save_recording_path,
|
1137 |
+
outputs=recordings_gallery
|
1138 |
+
)
|
1139 |
+
|
1140 |
+
with gr.TabItem("📁 UI Configuration", id=8):
|
1141 |
+
config_file_input = gr.File(
|
1142 |
+
label="Load UI Settings from Config File",
|
1143 |
+
file_types=[".json"],
|
1144 |
+
interactive=True
|
1145 |
+
)
|
1146 |
+
with gr.Row():
|
1147 |
+
load_config_button = gr.Button("Load Config", variant="primary")
|
1148 |
+
save_config_button = gr.Button("Save UI Settings", variant="primary")
|
1149 |
+
|
1150 |
+
config_status = gr.Textbox(
|
1151 |
+
label="Status",
|
1152 |
+
lines=2,
|
1153 |
+
interactive=False
|
1154 |
+
)
|
1155 |
+
save_config_button.click(
|
1156 |
+
fn=save_current_config,
|
1157 |
+
inputs=[], # 不需要输入参数
|
1158 |
+
outputs=[config_status]
|
1159 |
+
)
|
1160 |
+
|
1161 |
+
# Attach the callback to the LLM provider dropdown
|
1162 |
+
llm_provider.change(
|
1163 |
+
lambda provider, api_key, base_url: update_model_dropdown(provider, api_key, base_url),
|
1164 |
+
inputs=[llm_provider, llm_api_key, llm_base_url],
|
1165 |
+
outputs=llm_model_name
|
1166 |
+
)
|
1167 |
+
|
1168 |
+
# Add this after defining the components
|
1169 |
+
enable_recording.change(
|
1170 |
+
lambda enabled: gr.update(interactive=enabled),
|
1171 |
+
inputs=enable_recording,
|
1172 |
+
outputs=save_recording_path
|
1173 |
+
)
|
1174 |
+
|
1175 |
+
use_own_browser.change(fn=close_global_browser)
|
1176 |
+
keep_browser_open.change(fn=close_global_browser)
|
1177 |
+
|
1178 |
+
scan_and_register_components(demo)
|
1179 |
+
global webui_config_manager
|
1180 |
+
all_components = webui_config_manager.get_all_components()
|
1181 |
+
|
1182 |
+
load_config_button.click(
|
1183 |
+
fn=update_ui_from_config,
|
1184 |
+
inputs=[config_file_input],
|
1185 |
+
outputs=all_components + [config_status]
|
1186 |
+
)
|
1187 |
+
return demo
|
1188 |
+
|
1189 |
+
|
1190 |
+
def main():
|
1191 |
+
parser = argparse.ArgumentParser(description="Gradio UI for Browser Agent")
|
1192 |
+
parser.add_argument("--ip", type=str, default="127.0.0.1", help="IP address to bind to")
|
1193 |
+
parser.add_argument("--port", type=int, default=7788, help="Port to listen on")
|
1194 |
+
parser.add_argument("--theme", type=str, default="Ocean", choices=theme_map.keys(), help="Theme to use for the UI")
|
1195 |
+
args = parser.parse_args()
|
1196 |
+
|
1197 |
+
demo = create_ui(theme_name=args.theme)
|
1198 |
+
demo.launch(server_name="0.0.0.0",share=True, server_port=int(os.environ.get("PORT", 80)))
|
1199 |
+
|
1200 |
+
|
1201 |
+
|
1202 |
+
if __name__ == '__main__':
|
1203 |
+
main()
|