Update librechat.yaml
Browse files- librechat.yaml +25 -37
librechat.yaml
CHANGED
@@ -14,7 +14,7 @@ modelSpecs:
|
|
14 |
- name: "llama-4-maverick"
|
15 |
label: "Llama 4 Maverick"
|
16 |
default: true
|
17 |
-
iconURL: "https://
|
18 |
preset:
|
19 |
modelLabel: "Llama 4 Maverick"
|
20 |
endpoint: "groq"
|
@@ -24,7 +24,7 @@ modelSpecs:
|
|
24 |
top_p: 1
|
25 |
- name: "gemini-2.5-pro-exp-03-25"
|
26 |
label: "Gemini 2.5 Pro (experimental)"
|
27 |
-
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
28 |
preset:
|
29 |
modelLabel: "Gemini 2.5 Pro"
|
30 |
endpoint: "google"
|
@@ -35,36 +35,46 @@ modelSpecs:
|
|
35 |
maxOutputTokens: 65536
|
36 |
- name: "Gemini 2.5 Pro (experimental)"
|
37 |
label: "Gemini 2.5 Pro (experimental)"
|
38 |
-
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
39 |
preset:
|
40 |
endpoint: "agents"
|
41 |
agent_id: "agent_o-J94iEVBVZARI44PTpuH"
|
42 |
- name: "gemini-2.5-pro-exp-03-25-openrouter"
|
43 |
label: "Gemini 2.5 Pro (experimental)"
|
44 |
-
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
45 |
preset:
|
46 |
modelLabel: "Gemini 2.5 Pro"
|
47 |
endpoint: "OpenRouter"
|
48 |
model: "google/gemini-2.5-pro-exp-03-25:free"
|
49 |
-
- name: "o3-mini"
|
50 |
-
label: "o3-mini
|
51 |
-
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
52 |
preset:
|
53 |
-
modelLabel: "o3-mini"
|
54 |
endpoint: "Github Models"
|
55 |
model: "o3-mini"
|
56 |
max_tokens: 100000
|
57 |
reasoning_effort: "high"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
# - name: "grok-3-reasoning"
|
59 |
# label: "Grok 3 Think - 10"
|
60 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
61 |
# preset:
|
62 |
# modelLabel: "Grok 3 Think"
|
63 |
# endpoint: "grok"
|
64 |
# model: "grok-3-reasoning"
|
65 |
# - name: "deepseek-v3-0324"
|
66 |
# label: "DeepSeek V3 - 50*"
|
67 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
68 |
# preset:
|
69 |
# modelLabel: "DeepSeek V3"
|
70 |
# endpoint: "Github Models"
|
@@ -74,17 +84,9 @@ modelSpecs:
|
|
74 |
# top_p: 0.1
|
75 |
# presence_penalty: 0
|
76 |
# frequency_penalty: 0
|
77 |
-
# - name: "deepseek-v3-fireworks"
|
78 |
-
# label: "DeepSeek V3 - 2$"
|
79 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/Nothing/librechat/deepseek.svg"
|
80 |
-
# preset:
|
81 |
-
# modelLabel: "DeepSeek V3"
|
82 |
-
# endpoint: "HuggingFace"
|
83 |
-
# model: "accounts/fireworks/models/deepseek-v3-0324"
|
84 |
-
# max_tokens: 500
|
85 |
# - name: "gemini-2.0-flash"
|
86 |
# label: "Gemini 2.0 Flash - 1500*"
|
87 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
88 |
# preset:
|
89 |
# modelLabel: "Gemini 2.0 Flash"
|
90 |
# endpoint: "google"
|
@@ -95,7 +97,7 @@ modelSpecs:
|
|
95 |
# maxOutputTokens: 8192
|
96 |
# - name: "o1"
|
97 |
# label: "o1 - 8*"
|
98 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
99 |
# preset:
|
100 |
# modelLabel: "o1"
|
101 |
# endpoint: "Github Models"
|
@@ -104,7 +106,7 @@ modelSpecs:
|
|
104 |
# reasoning_effort: "high"
|
105 |
# - name: "deepseek-r1"
|
106 |
# label: "DeepSeek R1 - 8"
|
107 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
108 |
# preset:
|
109 |
# modelLabel: "DeepSeek R1"
|
110 |
# endpoint: "Github Models"
|
@@ -112,7 +114,7 @@ modelSpecs:
|
|
112 |
# max_tokens: 2048
|
113 |
# - name: "qwq-32b"
|
114 |
# label: "QwQ-32B - 1000"
|
115 |
-
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/
|
116 |
# preset:
|
117 |
# modelLabel: "QwQ-32B"
|
118 |
# endpoint: "groq"
|
@@ -170,15 +172,6 @@ endpoints:
|
|
170 |
- llama-3.2-1b-preview
|
171 |
titleConvo: true
|
172 |
titleModel: "llama-3.2-1b-preview"
|
173 |
-
|
174 |
-
# HuggingFace
|
175 |
-
# https://huggingface.co/settings/tokens
|
176 |
-
# - name: "HuggingFace"
|
177 |
-
# apiKey: "${HUGGINGFACE_KEY}"
|
178 |
-
# baseURL: "https://router.huggingface.co/fireworks-ai/v1"
|
179 |
-
# models:
|
180 |
-
# default:
|
181 |
-
# - accounts/fireworks/models/deepseek-v3-0324
|
182 |
|
183 |
# Grok
|
184 |
# - name: "grok"
|
@@ -207,9 +200,4 @@ speech:
|
|
207 |
url: "https://api.groq.com/openai/v1/audio/speech"
|
208 |
apiKey: "${TTS_API_KEY}"
|
209 |
model: "playai-tts"
|
210 |
-
voices: ["Celeste-PlayAI"]
|
211 |
-
|
212 |
-
# url: "https://guru-25-tts.hf.space/v1/audio/speech"
|
213 |
-
# apiKey: "${TTS_API_KEY}"
|
214 |
-
# model: "tts-1-hd"
|
215 |
-
# voices: ["en-US-AvaNeural", "en-US-AvaMultilingualNeural"]
|
|
|
14 |
- name: "llama-4-maverick"
|
15 |
label: "Llama 4 Maverick"
|
16 |
default: true
|
17 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/meta.svg"
|
18 |
preset:
|
19 |
modelLabel: "Llama 4 Maverick"
|
20 |
endpoint: "groq"
|
|
|
24 |
top_p: 1
|
25 |
- name: "gemini-2.5-pro-exp-03-25"
|
26 |
label: "Gemini 2.5 Pro (experimental)"
|
27 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/google.svg"
|
28 |
preset:
|
29 |
modelLabel: "Gemini 2.5 Pro"
|
30 |
endpoint: "google"
|
|
|
35 |
maxOutputTokens: 65536
|
36 |
- name: "Gemini 2.5 Pro (experimental)"
|
37 |
label: "Gemini 2.5 Pro (experimental)"
|
38 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/mistral.png"
|
39 |
preset:
|
40 |
endpoint: "agents"
|
41 |
agent_id: "agent_o-J94iEVBVZARI44PTpuH"
|
42 |
- name: "gemini-2.5-pro-exp-03-25-openrouter"
|
43 |
label: "Gemini 2.5 Pro (experimental)"
|
44 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/openrouter.png"
|
45 |
preset:
|
46 |
modelLabel: "Gemini 2.5 Pro"
|
47 |
endpoint: "OpenRouter"
|
48 |
model: "google/gemini-2.5-pro-exp-03-25:free"
|
49 |
+
- name: "o3-mini-high"
|
50 |
+
label: "o3-mini-high"
|
51 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/openai.svg"
|
52 |
preset:
|
53 |
+
modelLabel: "o3-mini-high"
|
54 |
endpoint: "Github Models"
|
55 |
model: "o3-mini"
|
56 |
max_tokens: 100000
|
57 |
reasoning_effort: "high"
|
58 |
+
- name: "gpt-4.1"
|
59 |
+
label: "GPT 4.1"
|
60 |
+
iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/openai.svg"
|
61 |
+
preset:
|
62 |
+
modelLabel: "GPT 4.1"
|
63 |
+
endpoint: "openAI"
|
64 |
+
model: "gpt-4.1"
|
65 |
+
temperature: 1
|
66 |
+
max_tokens: 2048
|
67 |
+
top_p: 1
|
68 |
# - name: "grok-3-reasoning"
|
69 |
# label: "Grok 3 Think - 10"
|
70 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/xai.svg"
|
71 |
# preset:
|
72 |
# modelLabel: "Grok 3 Think"
|
73 |
# endpoint: "grok"
|
74 |
# model: "grok-3-reasoning"
|
75 |
# - name: "deepseek-v3-0324"
|
76 |
# label: "DeepSeek V3 - 50*"
|
77 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/deepseek.svg"
|
78 |
# preset:
|
79 |
# modelLabel: "DeepSeek V3"
|
80 |
# endpoint: "Github Models"
|
|
|
84 |
# top_p: 0.1
|
85 |
# presence_penalty: 0
|
86 |
# frequency_penalty: 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
# - name: "gemini-2.0-flash"
|
88 |
# label: "Gemini 2.0 Flash - 1500*"
|
89 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/gemini.svg"
|
90 |
# preset:
|
91 |
# modelLabel: "Gemini 2.0 Flash"
|
92 |
# endpoint: "google"
|
|
|
97 |
# maxOutputTokens: 8192
|
98 |
# - name: "o1"
|
99 |
# label: "o1 - 8*"
|
100 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/openai.svg"
|
101 |
# preset:
|
102 |
# modelLabel: "o1"
|
103 |
# endpoint: "Github Models"
|
|
|
106 |
# reasoning_effort: "high"
|
107 |
# - name: "deepseek-r1"
|
108 |
# label: "DeepSeek R1 - 8"
|
109 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/deepseek.svg"
|
110 |
# preset:
|
111 |
# modelLabel: "DeepSeek R1"
|
112 |
# endpoint: "Github Models"
|
|
|
114 |
# max_tokens: 2048
|
115 |
# - name: "qwq-32b"
|
116 |
# label: "QwQ-32B - 1000"
|
117 |
+
# iconURL: "https://cdn.jsdelivr.net/gh/Guru-25/LibreChat/qwen.png"
|
118 |
# preset:
|
119 |
# modelLabel: "QwQ-32B"
|
120 |
# endpoint: "groq"
|
|
|
172 |
- llama-3.2-1b-preview
|
173 |
titleConvo: true
|
174 |
titleModel: "llama-3.2-1b-preview"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
# Grok
|
177 |
# - name: "grok"
|
|
|
200 |
url: "https://api.groq.com/openai/v1/audio/speech"
|
201 |
apiKey: "${TTS_API_KEY}"
|
202 |
model: "playai-tts"
|
203 |
+
voices: ["Celeste-PlayAI"]
|
|
|
|
|
|
|
|
|
|