Spaces:
Running
Running
# For more information, see the Configuration Guide: | |
# https://www.librechat.ai/docs/configuration/librechat_yaml | |
# Configuration version (required) | |
version: 1.2.1 | |
# Cache settings: Set to true to enable caching | |
cache: true | |
# Custom interface configuration | |
interface: | |
# Privacy policy settings | |
privacyPolicy: | |
externalUrl: 'https://librechat.ai/privacy-policy' | |
openNewTab: true | |
# Terms of service | |
termsOfService: | |
externalUrl: 'https://librechat.ai/tos' | |
openNewTab: true | |
modalAcceptance: true | |
modalTitle: "Terms of Service for LibreChat" | |
modalContent: | | |
# Terms and Conditions for LibreChat | |
Please do not use chatgpt , since librechat is better 😀 | |
Regards , Vaibhav | |
endpointsMenu: true | |
modelSelect: true | |
parameters: true | |
sidePanel: true | |
presets: true | |
prompts: true | |
bookmarks: true | |
multiConvo: true | |
agents: true | |
# Example Registration Object Structure (optional) | |
registration: | |
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook'] | |
# allowedDomains: | |
# - "gmail.com" | |
# speech: | |
# tts: | |
# openai: | |
# url: '' | |
# apiKey: '${TTS_API_KEY}' | |
# model: '' | |
# voices: [''] | |
# | |
# stt: | |
# openai: | |
# url: '' | |
# apiKey: '${STT_API_KEY}' | |
# model: '' | |
# rateLimits: | |
# fileUploads: | |
# ipMax: 100 | |
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP | |
# userMax: 50 | |
# userWindowInMinutes: 60 # Rate limit window for file uploads per user | |
# conversationsImport: | |
# ipMax: 100 | |
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP | |
# userMax: 50 | |
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user | |
# Example Actions Object Structure | |
actions: | |
allowedDomains: | |
- "swapi.dev" | |
- "librechat.ai" | |
- "google.com" | |
- "https://api.e2b.dev" | |
- "api.e2b.dev" | |
# Example MCP Servers Object Structure | |
mcpServers: | |
# everything: | |
# type: sse # type can optionally be omitted | |
# url: https://787d-182-69-182-121.ngrok-free.app/ | |
# memory: | |
# type: stdio | |
# command: npx | |
# args: | |
# - -y | |
# - "@modelcontextprotocol/server-memory" | |
# timeout: 60000000 | |
# apify: | |
# type: stdio | |
# command: npx | |
# args: | |
# - -y | |
# - "@apify/actors-mcp-server" | |
# - --actors | |
# - apify/screenshot-url,apify/website-content-crawler,apify/puppeteer-scraper,apify/rag-web-browser,jancurn/screenshot-taker,apify/cheerio-scraper,apify/playwright-scraper,apify/ai-web-agent,marco.gullo/page-printer,dz_omar/example-website-screenshot-crawler,apify/legacy-phantomjs-crawler,lukaskrivka/article-extractor-smart | |
# timeout: 60000000 | |
# env: | |
# APIFY_TOKEN: "apify_api_M3vftXQILokc2NDlhsc3twMBa5e7Be282swR" | |
# PATH: "/usr/local/bin:/usr/bin:/bin" | |
# NODE_PATH: "/usr/local/lib/node_modules" | |
# env: | |
# E2B_API_KEY: "e2b_6eb042e8d60248f71b0aadcc05f29a7dd353b3e2" | |
# PATH: "/usr/local/bin:/usr/bin:/bin" | |
# NODE_PATH: "/usr/local/lib/node_modules" | |
exa: | |
type: stdio | |
command: npx | |
args: | |
- -y | |
- "/app/exa-mcp-server/build/index.js" | |
# - --HF_TOKEN=${HF_TOKEN} | |
timeout: 60000000 | |
env: | |
EXA_API_KEY: "e4399980-1016-44ab-8789-1ef7f967a281" | |
PATH: "/usr/local/bin:/usr/bin:/bin" | |
NODE_PATH: "/usr/local/lib/node_modules" | |
arxiv: | |
type: stdio | |
command: python | |
args: | |
- -m | |
- mcp_simple_arxiv | |
timeout: 60000000 | |
pubmed: | |
type: stdio | |
command: python | |
args: | |
- -m | |
- mcp_simple_pubmed | |
env: | |
PUBMED_EMAIL: "[email protected]" | |
PUBMED_API_KEY : "77ea72d89b98d279c1848389cd027a51c408" | |
PATH: "/usr/local/bin:/usr/bin:/bin" | |
NODE_PATH: "/usr/local/lib/node_modules" | |
memory: | |
type: stdio | |
command: npx | |
args: | |
- -y | |
- "@modelcontextprotocol/server-memory" | |
timeout: 60000000 | |
codesandbox: | |
type: stdio | |
command: python | |
args: | |
- tests.py | |
timeout: 60000000 | |
# Definition of custom endpoints | |
endpoints: | |
agents: | |
recursionLimit: 50 | |
disableBuilder: false | |
capabilities: | |
# - "execute_code" | |
- "file_search" | |
- "actions" | |
- "tools" | |
custom: | |
# together.ai | |
# https://api.together.ai/settings/api-keys | |
- name: 'Tiny-DEV' # Unique name for the endpoint | |
# For `apiKey` and `baseURL`, you can use environment variables that you define. | |
# recommended environment variables: | |
apiKey: '77ea72d89b98d279c1848389cd027a51c408' | |
baseURL: 'https://13e4-182-69-178-15.ngrok-free.app/' | |
# Models configuration | |
models: | |
# List of default models to use. At least one value is required. | |
default: ['gemini-2.0-flash-thinking-exp-01-21' ,'gemini-2.5-pro-exp-03-25' ,'deepseek.r1','deepseek-reasoner','deepseek-chat','gemini-2.0-pro-exp-02-05','deepseek-r1-distill-llama-70b', 'qwq-32b','llama-3.3-70b-versatile'] | |
# Fetch option: Set to true to fetch models from API. | |
# fetch: false # Defaults to false. | |
# Optional configurations | |
# Title Conversation setting | |
titleConvo: true # Set to true to enable title conversation | |
modelDisplayLabel: 'Tiny' # Default is "AI" when not set. | |
# # Mistral AI Example | |
# - name: 'Mistral' # Unique name for the endpoint | |
# # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
# # recommended environment variables: | |
# apiKey: '${MISTRAL_API_KEY}' | |
# baseURL: 'https://api.mistral.ai/v1' | |
# # Models configuration | |
# models: | |
# # List of default models to use. At least one value is required. | |
# default: ['mistral-tiny', 'mistral-small', 'mistral-medium'] | |
# # Fetch option: Set to true to fetch models from API. | |
# fetch: true # Defaults to false. | |
# # Optional configurations | |
# # Title Conversation setting | |
# titleConvo: true # Set to true to enable title conversation | |
# # Title Method: Choose between "completion" or "functions". | |
# # titleMethod: "completion" # Defaults to "completion" if omitted. | |
# # Title Model: Specify the model to use for titles. | |
# titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted. | |
# # Summarize setting: Set to true to enable summarization. | |
# # summarize: false | |
# # Summary Model: Specify the model to use if summarization is enabled. | |
# # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. | |
# # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. | |
# # forcePrompt: false | |
# # The label displayed for the AI model in messages. | |
# modelDisplayLabel: 'Mistral' # Default is "AI" when not set. | |
# # Add additional parameters to the request. Default params will be overwritten. | |
# # addParams: | |
# # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ | |
# # Drop Default params parameters from the request. See default params in guide linked below. | |
# # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: | |
# dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'] | |
# # OpenRouter Example | |
# - name: 'OpenRouter' | |
# # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
# # recommended environment variables: | |
# # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. | |
# apiKey: '${OPENROUTER_KEY}' | |
# baseURL: 'https://openrouter.ai/api/v1' | |
# models: | |
# default: ['meta-llama/llama-3-70b-instruct'] | |
# fetch: true | |
# titleConvo: true | |
# titleModel: 'meta-llama/llama-3-70b-instruct' | |
# # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. | |
# dropParams: ['stop'] | |
# modelDisplayLabel: 'OpenRouter' | |
# together.ai | |
# https://api.together.ai/settings/api-keys | |
# Model list: https://docs.together.ai/docs/inference-models | |
# # Portkey AI Example | |
# - name: "Portkey" | |
# apiKey: "dummy" | |
# baseURL: 'https://api.portkey.ai/v1' | |
# headers: | |
# x-portkey-api-key: '${PORTKEY_API_KEY}' | |
# x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}' | |
# models: | |
# default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest'] | |
# fetch: true | |
# titleConvo: true | |
# titleModel: 'current_model' | |
# summarize: false | |
# summaryModel: 'current_model' | |
# forcePrompt: false | |
# modelDisplayLabel: 'Portkey' | |
# iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf | |
fileConfig: | |
endpoints: | |
agents: | |
fileLimit: 5 | |
fileSizeLimit: 100 # Maximum size for an individual file in MB | |
totalSizeLimit: 500 # Maximum total size for all files in a single request in MB | |
supportedMimeTypes: | |
- "image/.*" | |
- "application/pdf" | |
- "video/.*" | |
- "application/vnd.ms-excel" | |
- "audio/mp3" | |
- "audio/mpeg" | |
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" | |
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document" | |
- "application/msword" | |
# openAI: | |
# disabled: true # Disables file uploading to the OpenAI endpoint | |
# default: | |
# totalSizeLimit: 20 | |
# YourCustomEndpointName: | |
# fileLimit: 2 | |
# fileSizeLimit: 5 | |
# serverFileSizeLimit: 100 # Global server file size limit in MB | |
# avatarSizeLimit: 2 # Limit for user avatar image size in MB | |
# See the Custom Configuration Guide for more information on Assistants Config: | |
# https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint |