Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
ea1580e
1
Parent(s):
5f26e9c
Refactor LLM settings to use hardcoded defaults for model and max tokens; remove reliance on config.yaml for these parameters.
Browse files
polLLM.py
CHANGED
@@ -19,10 +19,10 @@ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"
|
|
19 |
logger.addHandler(handler)
|
20 |
|
21 |
# --- LLM settings from config.yaml ---
|
22 |
-
_DEFAULT_MODEL = _config.get("model", "openai-large")
|
23 |
_SYSTEM_TEMPLATE = _config.get("system_prompt", "")
|
24 |
_CHAR = _config.get("char", "Eve")
|
25 |
-
_DEFAULT_MAX_TOKENS = _config.get("max_tokens", 8000)
|
26 |
|
27 |
# --- OpenAI client init ---
|
28 |
client = OpenAI(
|
|
|
19 |
logger.addHandler(handler)
|
20 |
|
21 |
# --- LLM settings from config.yaml ---
|
22 |
+
_DEFAULT_MODEL = "openai-large" #_config.get("model", "openai-large")
|
23 |
_SYSTEM_TEMPLATE = _config.get("system_prompt", "")
|
24 |
_CHAR = _config.get("char", "Eve")
|
25 |
+
_DEFAULT_MAX_TOKENS = 8000 #_config.get("max_tokens", 8000)
|
26 |
|
27 |
# --- OpenAI client init ---
|
28 |
client = OpenAI(
|