Chandima Prabhath commited on
Commit
123c627
·
1 Parent(s): 9d508fe

Refactor generate_llm function to remove max_tokens and temperature parameters; use hardcoded defaults instead.

Browse files
Files changed (1) hide show
  1. polLLM.py +0 -8
polLLM.py CHANGED
@@ -22,7 +22,6 @@ logger.addHandler(handler)
22
  _DEFAULT_MODEL = "openai-large" #_config.get("model", "openai-large")
23
  _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
  _CHAR = _config.get("char", "Eve")
25
- _DEFAULT_MAX_TOKENS = 8000 #_config.get("max_tokens", 8000)
26
 
27
  # --- OpenAI client init ---
28
  client = OpenAI(
@@ -39,17 +38,12 @@ def _build_system_prompt() -> str:
39
  def generate_llm(
40
  prompt: str,
41
  model: str = None,
42
- max_tokens: int = None,
43
- temperature: float = None,
44
  ) -> str:
45
  """
46
  Send a chat-completion request to the LLM, with retries and backoff.
47
  Reads defaults from config.yaml, but can be overridden per-call.
48
  """
49
  model = model or _DEFAULT_MODEL
50
- max_tokens = max_tokens or _DEFAULT_MAX_TOKENS
51
- temperature = temperature if temperature is not None else _DEFAULT_TEMPERATURE
52
-
53
  system_prompt = _build_system_prompt()
54
  messages = [
55
  {"role": "system", "content": system_prompt},
@@ -64,8 +58,6 @@ def generate_llm(
64
  resp = client.chat.completions.create(
65
  model = model,
66
  messages = messages,
67
- max_tokens = max_tokens,
68
- temperature = temperature,
69
  seed = seed,
70
  )
71
  text = resp.choices[0].message.content.strip()
 
22
  _DEFAULT_MODEL = "openai-large" #_config.get("model", "openai-large")
23
  _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
  _CHAR = _config.get("char", "Eve")
 
25
 
26
  # --- OpenAI client init ---
27
  client = OpenAI(
 
38
  def generate_llm(
39
  prompt: str,
40
  model: str = None,
 
 
41
  ) -> str:
42
  """
43
  Send a chat-completion request to the LLM, with retries and backoff.
44
  Reads defaults from config.yaml, but can be overridden per-call.
45
  """
46
  model = model or _DEFAULT_MODEL
 
 
 
47
  system_prompt = _build_system_prompt()
48
  messages = [
49
  {"role": "system", "content": system_prompt},
 
58
  resp = client.chat.completions.create(
59
  model = model,
60
  messages = messages,
 
 
61
  seed = seed,
62
  )
63
  text = resp.choices[0].message.content.strip()