File size: 1,990 Bytes
44504f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#================================================================================================
#                                       Config of the LLMs
#================================================================================================
conv_model : "gpt-4o-mini" # the conversation model
programmer_model : "gpt-4o-mini"
inspector_model : "gpt-4o-mini"
api_key : ""
base_url_conv_model : 'https://api.openai.com/v1'
base_url_programmer : 'https://api.openai.com/v1'
base_url_inspector : 'htts://api.openai.com/v1'
max_token_conv_model: 4096 # the max token of the conversation model, this will determine the maximum length of the report.

# conv_model: "gemini-1.5-flash" # the conversation model for high-quality responses 
# programmer_model: "gemini-1.5-flash" # model for code-related tasks 
# inspector_model: "gemini-1.5-flash" # model for error-checking and debugging 
# api_key: "AIzaSyA_BrRio97V_i7-sQ41EzhNqNqyPE5Ny9E" # replace with your API key 
# base_url_conv_model: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash' 
# base_url_programmer: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash' 
# base_url_inspector: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash' 
# max_token_conv_model: 4096 # the maximum token limit for generating comprehensive reports


#================================================================================================
#                                       Config of the system
#================================================================================================
streaming : True

#cache_related
oss_endpoint: ""
oss_access_key_id: ""
oss_access_secret: ""
oss_bucket_name: ""
expired_time: 36000 # The expired time of the link in cache
cache_dir : "" # local cache dir
max_attempts : 5 # The max attempts of self-correcting
max_exe_time: 18000 # max time for the execution

#knowledge integration
retrieval : False
mode : "full"