Add run script to use Groq provider (#295)
Browse files- owl/.env_template +4 -0
- owl/run_groq.py +158 -0
owl/.env_template
CHANGED
@@ -7,6 +7,10 @@
|
|
7 |
OPENAI_API_KEY='Your_Key'
|
8 |
# OPENAI_API_BASE_URL=""
|
9 |
|
|
|
|
|
|
|
|
|
10 |
# Azure OpenAI API
|
11 |
# AZURE_OPENAI_BASE_URL=""
|
12 |
# AZURE_API_VERSION=""
|
|
|
7 |
OPENAI_API_KEY='Your_Key'
|
8 |
# OPENAI_API_BASE_URL=""
|
9 |
|
10 |
+
# When using GROQ remember to set OPENAI_API_BASE_URL to https://api.groq.com/openai/v1 to use the groq model according to https://console.groq.com/docs/openai
|
11 |
+
# and set OPENAI_API_KEY equal to GROQ_API_KEY
|
12 |
+
# GROQ_API_KEY=""
|
13 |
+
|
14 |
# Azure OpenAI API
|
15 |
# AZURE_OPENAI_BASE_URL=""
|
16 |
# AZURE_API_VERSION=""
|
owl/run_groq.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
14 |
+
|
15 |
+
"""
|
16 |
+
This module provides integration with the Groq API platform for the OWL system.
|
17 |
+
|
18 |
+
It configures different agent roles with appropriate Groq models based on their requirements:
|
19 |
+
- Tool-intensive roles (assistant, web, planning, video, image) use GROQ_LLAMA_3_3_70B
|
20 |
+
- Document processing uses GROQ_MIXTRAL_8_7B
|
21 |
+
- Simple roles (user) use GROQ_LLAMA_3_1_8B
|
22 |
+
|
23 |
+
To use this module:
|
24 |
+
1. Set GROQ_API_KEY in your .env file
|
25 |
+
2. Set OPENAI_API_BASE_URL to "https://api.groq.com/openai/v1"
|
26 |
+
3. Run with: python -m owl.run_groq
|
27 |
+
"""
|
28 |
+
|
29 |
+
from dotenv import load_dotenv
|
30 |
+
from camel.models import ModelFactory
|
31 |
+
from camel.toolkits import (
|
32 |
+
AudioAnalysisToolkit,
|
33 |
+
CodeExecutionToolkit,
|
34 |
+
ExcelToolkit,
|
35 |
+
ImageAnalysisToolkit,
|
36 |
+
SearchToolkit,
|
37 |
+
VideoAnalysisToolkit,
|
38 |
+
BrowserToolkit,
|
39 |
+
FileWriteToolkit,
|
40 |
+
)
|
41 |
+
from camel.types import ModelPlatformType, ModelType
|
42 |
+
from camel.logger import set_log_level
|
43 |
+
|
44 |
+
from utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
|
45 |
+
|
46 |
+
load_dotenv()
|
47 |
+
|
48 |
+
set_log_level(level="DEBUG")
|
49 |
+
|
50 |
+
|
51 |
+
def construct_society(question: str) -> OwlRolePlaying:
|
52 |
+
r"""Construct a society of agents based on the given question.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
question (str): The task or question to be addressed by the society.
|
56 |
+
|
57 |
+
Returns:
|
58 |
+
OwlRolePlaying: A configured society of agents ready to address the question.
|
59 |
+
"""
|
60 |
+
|
61 |
+
# Create models for different components
|
62 |
+
models = {
|
63 |
+
"user": ModelFactory.create(
|
64 |
+
model_platform=ModelPlatformType.GROQ,
|
65 |
+
model_type=ModelType.GROQ_LLAMA_3_1_8B, # Simple role, can use 8B model
|
66 |
+
model_config_dict={"temperature": 0},
|
67 |
+
),
|
68 |
+
"assistant": ModelFactory.create(
|
69 |
+
model_platform=ModelPlatformType.GROQ,
|
70 |
+
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Main assistant needs tool capability
|
71 |
+
model_config_dict={"temperature": 0},
|
72 |
+
),
|
73 |
+
"web": ModelFactory.create(
|
74 |
+
model_platform=ModelPlatformType.GROQ,
|
75 |
+
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Web browsing requires tool usage
|
76 |
+
model_config_dict={"temperature": 0},
|
77 |
+
),
|
78 |
+
"planning": ModelFactory.create(
|
79 |
+
model_platform=ModelPlatformType.GROQ,
|
80 |
+
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Planning requires complex reasoning
|
81 |
+
model_config_dict={"temperature": 0},
|
82 |
+
),
|
83 |
+
"video": ModelFactory.create(
|
84 |
+
model_platform=ModelPlatformType.GROQ,
|
85 |
+
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Video analysis is multimodal
|
86 |
+
model_config_dict={"temperature": 0},
|
87 |
+
),
|
88 |
+
"image": ModelFactory.create(
|
89 |
+
model_platform=ModelPlatformType.GROQ,
|
90 |
+
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Image analysis is multimodal
|
91 |
+
model_config_dict={"temperature": 0},
|
92 |
+
),
|
93 |
+
"document": ModelFactory.create(
|
94 |
+
model_platform=ModelPlatformType.GROQ,
|
95 |
+
model_type=ModelType.GROQ_MIXTRAL_8_7B, # Document processing can use Mixtral
|
96 |
+
model_config_dict={"temperature": 0},
|
97 |
+
),
|
98 |
+
}
|
99 |
+
|
100 |
+
# Configure toolkits
|
101 |
+
tools = [
|
102 |
+
*BrowserToolkit(
|
103 |
+
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
104 |
+
web_agent_model=models["web"],
|
105 |
+
planning_agent_model=models["planning"],
|
106 |
+
).get_tools(),
|
107 |
+
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
108 |
+
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
|
109 |
+
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
110 |
+
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
111 |
+
SearchToolkit().search_duckduckgo,
|
112 |
+
SearchToolkit().search_google, # Comment this out if you don't have google search
|
113 |
+
SearchToolkit().search_wiki,
|
114 |
+
*ExcelToolkit().get_tools(),
|
115 |
+
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
116 |
+
*FileWriteToolkit(output_dir="./").get_tools(),
|
117 |
+
]
|
118 |
+
|
119 |
+
# Configure agent roles and parameters
|
120 |
+
user_agent_kwargs = {"model": models["user"]}
|
121 |
+
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
122 |
+
|
123 |
+
# Configure task parameters
|
124 |
+
task_kwargs = {
|
125 |
+
"task_prompt": question,
|
126 |
+
"with_task_specify": False,
|
127 |
+
}
|
128 |
+
|
129 |
+
# Create and return the society
|
130 |
+
society = OwlRolePlaying(
|
131 |
+
**task_kwargs,
|
132 |
+
user_role_name="user",
|
133 |
+
user_agent_kwargs=user_agent_kwargs,
|
134 |
+
assistant_role_name="assistant",
|
135 |
+
assistant_agent_kwargs=assistant_agent_kwargs,
|
136 |
+
)
|
137 |
+
|
138 |
+
return society
|
139 |
+
|
140 |
+
|
141 |
+
def main():
|
142 |
+
r"""Main function to run the OWL system with an example question."""
|
143 |
+
# Example research question
|
144 |
+
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
145 |
+
|
146 |
+
# Construct and run the society
|
147 |
+
# Note: This configuration uses GROQ_LLAMA_3_3_70B for tool-intensive roles (assistant, web, planning, video, image)
|
148 |
+
# and GROQ_MIXTRAL_8_7B for document processing. GROQ_LLAMA_3_1_8B is used only for the user role
|
149 |
+
# which doesn't require tool usage capabilities.
|
150 |
+
society = construct_society(question)
|
151 |
+
answer, chat_history, token_count = run_society(society)
|
152 |
+
|
153 |
+
# Output the result
|
154 |
+
print(f"\033[94mAnswer: {answer}\033[0m")
|
155 |
+
|
156 |
+
|
157 |
+
if __name__ == "__main__":
|
158 |
+
main()
|