File size: 4,039 Bytes
444a581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os

from dotenv import load_dotenv

MAX_TOKENS = 5
# Load environment variables
load_dotenv()

# Define the models and their configurations
models = [
    {
        "name": "DEEPSEEK",
        "config": {
            "apiKey": os.getenv("DEEPSEEK_API_KEY"),
            "baseURL": "https://api.deepseek.com",
            "model": "deepseek-chat",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "GPT-3.5-Turbo",
        "config": {
            "apiKey": os.getenv("OPENAI_API_KEY"),
            "baseURL": "https://api.openai.com/v1",
            "model": "gpt-3.5-turbo",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "Kimi-Chat",
        "config": {
            "apiKey": os.getenv("MOONSHOT_API_KEY"),
            "baseURL": "https://api.moonshot.cn/v1",
            "model": "moonshot-v1-8k",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "GPT-4o",
        "config": {
            "apiKey": os.getenv("OPENAI_API_KEY"),
            "baseURL": "https://api.openai.com/v1",
            "model": "gpt-4o",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "GPT-4o-mini",
        "config": {
            "apiKey": os.getenv("OPENAI_API_KEY"),
            "baseURL": "https://api.openai.com/v1",
            "model": "gpt-4o-mini",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "Llama-3.1-405b",
        "config": {
            "apiKey": os.getenv("TOGETHER_API_KEY"),
            "model": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1,
            "repetition_penalty": 1,
            "stop": ["<|eot_id|>"]
        },
        "type": "together"
    },
    {
        "name": "Llama3.1-70b",
        "config": {
            "apiKey": os.getenv("TOGETHER_API_KEY"),
            "model": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1,
            "repetition_penalty": 1,
            "stop": ["<|eot_id|>"]
        },
        "type": "together"
    },
    {
        "name": "Qwen2-72B-Instruct",
        "config": {
            "apiKey": os.getenv("TOGETHER_API_KEY"),
            "model": "Qwen/Qwen2-72B-Instruct",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1,
            "repetition_penalty": 1,
            "stop": ["<|im_start|>", "<|im_end|>"]
        },
        "type": "together"
    },
    {
        "name": "Doubao-4k",
        "config": {
            "apiKey": os.getenv("DOUBAO_API_KEY"),
            "baseURL": "https://ark.cn-beijing.volces.com/api/v3",
            "model": "ep-20240802142948-6vvc7",  # Replace with the actual endpoint ID if different
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
            "top_p": 1
        },
        "type": "openai"
    },
    {
        "name": "Claude-3.5-Sonnet",
        "config": {
            "apiKey": os.getenv("ANTHROPIC_API_KEY"),
            "model": "claude-3-5-sonnet-20240620",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.0,
        },
        "type": "anthropic"
    },
    {
        "name": "MiniMax-ABAB6.5s",
        "config": {
            "groupId": os.getenv("MINIMAX_GROUP_ID"),
            "apiKey": os.getenv("MINIMAX_API_KEY"),
            "model": "abab6.5s-chat",
            "maxTokens": MAX_TOKENS,
            "temperature": 0.01,  # must be (0, 1]
            "top_p": 1
        },
        "type": "minimax"
    },
]