--- model_url: https://huggingface.co/openaccess-ai-collective/manticore-13b-chat-pyg typer: delay: 0.1 runpod: endpoint_id: jifr1oczbrmr3n prefer_async: true llm: top_k: top_p: temperature: repetition_penalty: last_n_tokens: seed: -1 batch_size: 8 threads: -1 stop: - "" queue: max_size: 16 concurrency_count: 1 # recommend setting this no larger than your current