UntilDot commited on
Commit
b4c6e6a
·
verified ·
1 Parent(s): 484fac9

Update llm/agents.py

Browse files
Files changed (1) hide show
  1. llm/agents.py +68 -62
llm/agents.py CHANGED
@@ -1,90 +1,96 @@
1
- import asyncio
2
- import httpx
3
  import os
4
  import json
 
5
 
6
- # Load model config at startup
7
- with open("llm/model_config.json", "r") as f:
8
  CONFIG = json.load(f)
9
 
10
  PROVIDERS = CONFIG["providers"]
11
- MODEL_PROVIDER_MAPPING = CONFIG["models"]
12
-
13
- async def call_model_api(model: str, prompt: str) -> str:
14
- provider_key = MODEL_PROVIDER_MAPPING.get(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  if not provider_key:
16
- raise ValueError(f"No provider configured for model: {model}")
17
 
18
  provider = PROVIDERS.get(provider_key)
19
  if not provider:
20
- raise ValueError(f"Provider {provider_key} not found in config")
21
 
22
- url = provider["url"]
23
  api_key_env = provider["key_env"]
24
  api_key = os.getenv(api_key_env)
25
 
26
  if not api_key:
27
- raise ValueError(f"Missing API key for provider {provider_key}")
28
 
29
  headers = {
30
  "Authorization": f"Bearer {api_key}",
31
- "Content-Type": "application/json",
32
  }
33
 
34
- body = {
35
- "model": model,
36
- "messages": [{"role": "user", "content": prompt}],
37
- "temperature": 0.7,
 
 
38
  }
39
 
40
- async with httpx.AsyncClient(timeout=30) as client:
41
- response = await client.post(url, headers=headers, json=body)
42
- response.raise_for_status()
43
- return response.json()["choices"][0]["message"]["content"]
44
-
45
- async def query_llm_agent(name: str, prompt: str, settings: dict) -> str:
46
- selected_model = settings.get("models", {}).get(name)
 
47
 
48
- if not selected_model:
49
- return f"[{name}] No model selected."
50
 
51
- if selected_model not in MODEL_PROVIDER_MAPPING:
52
- return f"[{name}] Model '{selected_model}' is not supported."
 
 
 
 
53
 
54
- try:
55
- response = await call_model_api(selected_model, prompt)
56
- return f"[{name}] {response}"
57
- except Exception as e:
58
- return f"[{name}] Error: {str(e)}"
59
-
60
- async def query_all_llms(prompt: str, settings: dict) -> list:
61
- agents = ["LLM-A", "LLM-B", "LLM-C"]
62
- tasks = [query_llm_agent(agent, prompt, settings) for agent in agents]
63
- return await asyncio.gather(*tasks)
64
-
65
- async def query_aggregator(responses: list, settings: dict) -> str:
66
- model = settings.get("aggregator")
67
- if not model:
68
- return "[Aggregator] No aggregator model selected."
69
- if model not in MODEL_PROVIDER_MAPPING:
70
- return f"[Aggregator] Model '{model}' is not supported."
71
-
72
- system_prompt = (
73
- "You are an aggregator AI. Your task is to read the following responses "
74
- "from different AI agents and produce a single, high-quality response.\n\n"
75
- + "\n\n".join(responses)
76
  )
77
 
78
- try:
79
- result = await call_model_api(model, system_prompt)
80
- return f"[Aggregator] {result}"
81
- except Exception as e:
82
- return f"[Aggregator] Error: {str(e)}"
83
-
84
- def query_all_llms_sync(prompt: str, settings: dict) -> list:
85
- return asyncio.run(query_moa_chain(prompt, settings))
86
 
87
- async def query_moa_chain(prompt: str, settings: dict) -> list:
88
- responses = await query_all_llms(prompt, settings)
89
- aggregator = await query_aggregator(responses, settings)
90
- return responses + [aggregator]
 
 
 
1
  import os
2
  import json
3
+ import httpx
4
 
5
+ # Load providers and models
6
+ with open("llm/config.json", "r") as f:
7
  CONFIG = json.load(f)
8
 
9
  PROVIDERS = CONFIG["providers"]
10
+ MODELS = CONFIG["models"]
11
+
12
+ # === SYSTEM PROMPTS ===
13
+ STRUCTURED_ASSISTANT_PROMPT = """You are a helpful AI assistant.
14
+
15
+ - Respond to the user’s message in a structured and professional way.
16
+ - Match the length and complexity of your response to the user's input.
17
+ - If the user's input is simple (e.g., "Hi"), reply politely without overexplaining.
18
+ - If the user's input is complex, give a complete and organized answer.
19
+ - Do not repeat the user's prompt.
20
+ - Be direct, helpful, and clear.
21
+ """
22
+
23
+ AGGREGATOR_PROMPT = """You are an AI responsible for combining the outputs of multiple AI assistants.
24
+
25
+ - Read their answers carefully.
26
+ - Identify the best parts from each.
27
+ - Write a single, coherent, and helpful reply.
28
+ - Do not simply merge texts or repeat everything.
29
+ - Match the depth and tone to the user's original input.
30
+ - Keep it natural and conversational.
31
+ """
32
+
33
+ # === CORE FUNCTIONS ===
34
+ async def query_llm(model_name, user_input, role_prompt):
35
+ provider_key = MODELS.get(model_name)
36
  if not provider_key:
37
+ return f"Model '{model_name}' is not supported."
38
 
39
  provider = PROVIDERS.get(provider_key)
40
  if not provider:
41
+ return f"Provider '{provider_key}' is not configured."
42
 
43
+ endpoint = provider["url"]
44
  api_key_env = provider["key_env"]
45
  api_key = os.getenv(api_key_env)
46
 
47
  if not api_key:
48
+ return f"API key for provider '{provider_key}' not found."
49
 
50
  headers = {
51
  "Authorization": f"Bearer {api_key}",
52
+ "Content-Type": "application/json"
53
  }
54
 
55
+ payload = {
56
+ "model": model_name,
57
+ "messages": [
58
+ {"role": "system", "content": role_prompt},
59
+ {"role": "user", "content": user_input}
60
+ ]
61
  }
62
 
63
+ try:
64
+ async with httpx.AsyncClient(timeout=60.0) as client:
65
+ response = await client.post(endpoint, headers=headers, json=payload)
66
+ response.raise_for_status()
67
+ data = response.json()
68
+ return data["choices"][0]["message"]["content"]
69
+ except Exception as e:
70
+ return f"Error: {str(e)}"
71
 
 
 
72
 
73
+ async def query_moa_chain(user_input, settings):
74
+ """Queries LLM-A, LLM-B, LLM-C, and Aggregator in sequence."""
75
+ llm_a = settings["models"].get("LLM-A")
76
+ llm_b = settings["models"].get("LLM-B")
77
+ llm_c = settings["models"].get("LLM-C")
78
+ aggregator = settings.get("aggregator")
79
 
80
+ # Parallel queries to LLM-A, B, C
81
+ results = await asyncio.gather(
82
+ query_llm(llm_a, user_input, STRUCTURED_ASSISTANT_PROMPT),
83
+ query_llm(llm_b, user_input, STRUCTURED_ASSISTANT_PROMPT),
84
+ query_llm(llm_c, user_input, STRUCTURED_ASSISTANT_PROMPT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  )
86
 
87
+ # Format outputs to feed aggregator
88
+ combined_content = (
89
+ f"[LLM-A] {results[0]}\n\n"
90
+ f"[LLM-B] {results[1]}\n\n"
91
+ f"[LLM-C] {results[2]}"
92
+ )
 
 
93
 
94
+ # Single query to Aggregator (LLM-D)
95
+ final_response = await query_llm(aggregator, combined_content, AGGREGATOR_PROMPT)
96
+ return final_response