Update librechat.yaml
Browse files- librechat.yaml +32 -29
librechat.yaml
CHANGED
@@ -77,15 +77,15 @@ modelSpecs:
|
|
77 |
model: "o3-mini"
|
78 |
max_tokens: 100000
|
79 |
reasoning_effort: "high"
|
80 |
-
- name: "o1"
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
# - name: "deepseek-r1"
|
90 |
# label: "DeepSeek R1 - 8"
|
91 |
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/deepseek.svg"
|
@@ -109,16 +109,16 @@ modelSpecs:
|
|
109 |
# endpoint: "HuggingFace"
|
110 |
# model: "accounts/fireworks/models/deepseek-r1"
|
111 |
# max_tokens: 500
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
- name: "Mistral OCR"
|
123 |
label: "Mistral OCR"
|
124 |
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/mistral.png"
|
@@ -128,6 +128,9 @@ modelSpecs:
|
|
128 |
|
129 |
endpoints:
|
130 |
agents:
|
|
|
|
|
|
|
131 |
capabilities:
|
132 |
- "ocr"
|
133 |
|
@@ -141,7 +144,7 @@ endpoints:
|
|
141 |
models:
|
142 |
default:
|
143 |
- o3-mini
|
144 |
-
- o1
|
145 |
# - deepseek-r1
|
146 |
# - deepseek-v3
|
147 |
- gpt-4o-mini
|
@@ -166,15 +169,15 @@ endpoints:
|
|
166 |
|
167 |
# groq
|
168 |
# Model list: https://console.groq.com/settings/limits
|
169 |
-
- name: "groq"
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
|
179 |
# HuggingFace
|
180 |
# https://huggingface.co/settings/tokens
|
|
|
77 |
model: "o3-mini"
|
78 |
max_tokens: 100000
|
79 |
reasoning_effort: "high"
|
80 |
+
# - name: "o1"
|
81 |
+
# label: "o1 - 8*"
|
82 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/openai.svg"
|
83 |
+
# preset:
|
84 |
+
# modelLabel: "o1"
|
85 |
+
# endpoint: "Github Models"
|
86 |
+
# model: "o1"
|
87 |
+
# max_tokens: 40000
|
88 |
+
# reasoning_effort: "high"
|
89 |
# - name: "deepseek-r1"
|
90 |
# label: "DeepSeek R1 - 8"
|
91 |
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/deepseek.svg"
|
|
|
109 |
# endpoint: "HuggingFace"
|
110 |
# model: "accounts/fireworks/models/deepseek-r1"
|
111 |
# max_tokens: 500
|
112 |
+
#- name: "qwq-32b"
|
113 |
+
# label: "QwQ-32B - 1000"
|
114 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/qwen.png"
|
115 |
+
# preset:
|
116 |
+
# modelLabel: "QwQ-32B"
|
117 |
+
# endpoint: "groq"
|
118 |
+
# model: "qwen-qwq-32b"
|
119 |
+
# temperature: 0.6
|
120 |
+
# max_tokens: 4096
|
121 |
+
# top_p: 0.95
|
122 |
- name: "Mistral OCR"
|
123 |
label: "Mistral OCR"
|
124 |
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/mistral.png"
|
|
|
128 |
|
129 |
endpoints:
|
130 |
agents:
|
131 |
+
allowedProviders:
|
132 |
+
- "google"
|
133 |
+
- "OpenRouter"
|
134 |
capabilities:
|
135 |
- "ocr"
|
136 |
|
|
|
144 |
models:
|
145 |
default:
|
146 |
- o3-mini
|
147 |
+
# - o1
|
148 |
# - deepseek-r1
|
149 |
# - deepseek-v3
|
150 |
- gpt-4o-mini
|
|
|
169 |
|
170 |
# groq
|
171 |
# Model list: https://console.groq.com/settings/limits
|
172 |
+
# - name: "groq"
|
173 |
+
# apiKey: "${GROQ_KEY}"
|
174 |
+
# baseURL: "https://api.groq.com/openai/v1"
|
175 |
+
# models:
|
176 |
+
# default:
|
177 |
+
# - qwen-qwq-32b
|
178 |
+
# - gemma2-9b-it
|
179 |
+
# titleConvo: true
|
180 |
+
# titleModel: "gemma2-9b-it"
|
181 |
|
182 |
# HuggingFace
|
183 |
# https://huggingface.co/settings/tokens
|