Chandima Prabhath commited on
Commit
7b1490a
·
1 Parent(s): e15e135

Refactor LLM integration and function call handling; streamline message processing and enhance error handling

Browse files
Files changed (2) hide show
  1. app.py +439 -74
  2. polLLM.py +19 -58
app.py CHANGED
@@ -4,27 +4,40 @@ import requests
4
  import logging
5
  import queue
6
  import json
7
- from typing import List, Optional
 
 
 
8
  from fastapi import FastAPI, Request, HTTPException
9
  from fastapi.responses import PlainTextResponse
10
- from pydantic import BaseModel, Field
11
 
12
  from FLUX import generate_image
13
  from VoiceReply import generate_voice_reply
14
- from polLLM import call_llm_function, LLMBadRequestError
 
 
15
 
16
- # --- Logging Setup ---
17
  LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
18
  logger = logging.getLogger("eve_bot")
19
  logger.setLevel(LOG_LEVEL)
 
20
  handler = logging.StreamHandler()
21
  formatter = logging.Formatter(
22
  "%(asctime)s [%(levelname)s] [%(message_id)s/%(sender)s] %(message)s"
23
  )
24
  handler.setFormatter(formatter)
 
 
 
 
 
 
 
 
25
  logger.handlers = [handler]
26
 
27
- # Thread‐local to carry context
28
  _thread_ctx = threading.local()
29
  def set_thread_context(chat_id, sender, message_id):
30
  _thread_ctx.chat_id = chat_id
@@ -38,20 +51,31 @@ def get_thread_context():
38
  getattr(_thread_ctx, "message_id", None),
39
  )
40
 
41
- # Conversation history
42
- from collections import defaultdict, deque
43
  history = defaultdict(lambda: deque(maxlen=20))
44
- def record(role, chat_id, sender, content):
45
- history[(chat_id, sender)].append({"role": role, "content": content})
46
 
47
- # BotClient & helpers (send_text, send_media, _fn_generate_images, etc.)
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  class BotConfig:
49
  GREEN_API_URL = os.getenv("GREEN_API_URL")
50
  GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
51
  GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
52
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
53
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
54
- BOT_GROUP_CHAT = os.getenv("BOT_GROUP_CHAT")
55
  BOT_JID = os.getenv("BOT_JID")
56
  IMAGE_DIR = "/tmp/images"
57
  AUDIO_DIR = "/tmp/audio"
@@ -61,7 +85,7 @@ class BotConfig:
61
  def validate(cls):
62
  missing = [n for n in (
63
  "GREEN_API_URL","GREEN_API_TOKEN",
64
- "GREEN_API_ID_INSTANCE","WEBHOOK_AUTH_TOKEN","BOT_JID","BOT_GROUP_CHAT"
65
  ) if not getattr(cls, n)]
66
  if missing:
67
  raise ValueError(f"Missing env vars: {', '.join(missing)}")
@@ -98,6 +122,12 @@ class BotClient:
98
  "quotedMessageId": message_id
99
  })
100
 
 
 
 
 
 
 
101
  def send_media(self, message_id, chat_id, file_path, caption, media_type):
102
  endpoint = "sendFileByUpload"
103
  payload = {
@@ -113,8 +143,12 @@ class BotClient:
113
  BotConfig.validate()
114
  client = BotClient(BotConfig)
115
 
116
- # Thread pool for images/audio
 
117
  task_queue = queue.Queue()
 
 
 
118
  def worker():
119
  while True:
120
  task = task_queue.get()
@@ -127,15 +161,17 @@ def worker():
127
  logger.error(f"Worker error {task}: {e}")
128
  finally:
129
  task_queue.task_done()
 
130
  for _ in range(4):
131
  threading.Thread(target=worker, daemon=True).start()
132
 
133
- # Basic send_text
 
134
  def _fn_send_text(mid, cid, message):
135
  client.send_message(mid, cid, message)
136
  chat_id, sender, _ = get_thread_context()
137
  if chat_id and sender:
138
- record("assistant", chat_id, sender, message)
139
  task_queue.put({
140
  "type": "audio",
141
  "message_id": mid,
@@ -143,26 +179,120 @@ def _fn_send_text(mid, cid, message):
143
  "prompt": message
144
  })
145
 
146
- # Image generation helper
147
- def _fn_generate_images(message_id, chat_id, prompt, count=1, width=None, height=None):
148
- _fn_send_text(message_id, chat_id, f"✨ Generating {count} image(s)…")
149
- for i in range(count):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  try:
151
  img, path, ret_p, url = generate_image(
152
  prompt, message_id, message_id, BotConfig.IMAGE_DIR,
153
  width=width, height=height
154
  )
155
  formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
156
- cap = f"✨ Image {i+1}/{count}: {url}\n>{chr(8203)} {formatted}"
157
  client.send_media(message_id, chat_id, path, cap, media_type="image")
158
  os.remove(path)
159
  except Exception as e:
160
- logger.warning(f"Img {i+1}/{count} failed: {e}")
161
- _fn_send_text(message_id, chat_id, f"😢 Failed to generate image {i+1}/{count}.")
162
 
163
- # Voice reply helper
164
- def _fn_voice_reply(message_id, chat_id, prompt, **_):
165
- proc = f"Just say this exactly as written in a friendly, playful way: {prompt}"
 
 
 
 
 
 
 
166
  res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
167
  if res and res[0]:
168
  path, _ = res
@@ -171,83 +301,318 @@ def _fn_voice_reply(message_id, chat_id, prompt, **_):
171
  else:
172
  _fn_send_text(message_id, chat_id, prompt)
173
 
174
- # Function specs for LLM
175
- FUNCTIONS = [
176
- {
177
- "name": "generate_image",
178
- "description": "Generate one or more images",
179
- "parameters": {
180
- "type": "object",
181
- "properties": {
182
- "prompt": {"type": "string"},
183
- "count": {"type": "integer", "minimum": 1},
184
- "width": {"type": "integer"},
185
- "height": {"type": "integer"}
186
- },
187
- "required": ["prompt"]
188
- }
189
- },
190
- {
191
- "name": "send_text",
192
- "description": "Send a text message",
193
- "parameters": {
194
- "type": "object",
195
- "properties": {"message": {"type": "string"}},
196
- "required": ["message"]
197
- }
198
- }
199
- # Add other function specs (summarize, translate, etc.) here...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  ]
201
 
202
- def handle_function_call(call, mid, cid):
203
- name = call["name"]
204
- args = call.get("arguments", {})
205
- if name == "generate_image":
206
- _fn_generate_images(mid, cid, **args)
207
- elif name == "send_text":
208
- _fn_send_text(mid, cid, args["message"])
209
- # handle other functions similarly...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
- # FastAPI setup
212
  app = FastAPI()
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
  @app.post("/whatsapp")
215
  async def whatsapp_webhook(request: Request):
216
  data = await request.json()
217
- if request.headers.get("Authorization") != f"Bearer {os.getenv('WEBHOOK_AUTH_TOKEN')}":
218
  raise HTTPException(403, "Unauthorized")
219
 
220
  chat_id = data["senderData"]["chatId"]
221
  sender = data["senderData"]["sender"]
222
  mid = data["idMessage"]
223
  set_thread_context(chat_id, sender, mid)
 
 
 
 
224
 
225
- md = data["messageData"]
226
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
227
- if not tmd or chat_id != os.getenv("BOT_GROUP_CHAT") or data["typeWebhook"] != "incomingMessageReceived":
228
  return {"success": True}
229
 
230
  body = (tmd.get("textMessage") or tmd.get("text","")).strip()
231
- record("user", chat_id, sender, body)
 
232
 
233
- try:
234
- call = call_llm_function(body, list(history[(chat_id, sender)]), FUNCTIONS)
235
- except LLMBadRequestError:
236
- history[(chat_id, sender)].clear()
237
- _fn_send_text(mid, chat_id, "Oops, let’s start fresh!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  return {"success": True}
239
 
240
- if "name" in call:
241
- handle_function_call(call, mid, chat_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  else:
243
- _fn_send_text(mid, chat_id, call["content"])
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  return {"success": True}
246
 
247
  @app.get("/", response_class=PlainTextResponse)
248
  def index():
249
- return "Eve is running!"
250
 
251
  if __name__ == "__main__":
 
 
 
 
252
  import uvicorn
253
- uvicorn.run("app:app", host="0.0.0.0", port=7860)
 
4
  import logging
5
  import queue
6
  import json
7
+ from typing import List, Optional, Union, Literal
8
+ from collections import defaultdict, deque
9
+ from concurrent.futures import ThreadPoolExecutor
10
+
11
  from fastapi import FastAPI, Request, HTTPException
12
  from fastapi.responses import PlainTextResponse
13
+ from pydantic import BaseModel, Field, ValidationError
14
 
15
  from FLUX import generate_image
16
  from VoiceReply import generate_voice_reply
17
+ from polLLM import generate_llm, LLMBadRequestError # assume this exception is raised on 400
18
+
19
+ # --- Logging Setup ---------------------------------------------------------
20
 
 
21
  LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
22
  logger = logging.getLogger("eve_bot")
23
  logger.setLevel(LOG_LEVEL)
24
+
25
  handler = logging.StreamHandler()
26
  formatter = logging.Formatter(
27
  "%(asctime)s [%(levelname)s] [%(message_id)s/%(sender)s] %(message)s"
28
  )
29
  handler.setFormatter(formatter)
30
+
31
+ class ContextFilter(logging.Filter):
32
+ def filter(self, record):
33
+ record.message_id = getattr(record, "message_id", "-")
34
+ record.sender = getattr(record, "sender", "-")
35
+ return True
36
+
37
+ handler.addFilter(ContextFilter())
38
  logger.handlers = [handler]
39
 
40
+ # Thread‐local to carry context through helpers
41
  _thread_ctx = threading.local()
42
  def set_thread_context(chat_id, sender, message_id):
43
  _thread_ctx.chat_id = chat_id
 
51
  getattr(_thread_ctx, "message_id", None),
52
  )
53
 
54
+ # --- Conversation History -------------------------------------------------
55
+
56
  history = defaultdict(lambda: deque(maxlen=20))
 
 
57
 
58
+ def record_user_message(chat_id, sender, message):
59
+ history[(chat_id, sender)].append(f"User: {message}")
60
+
61
+ def record_bot_message(chat_id, sender, message):
62
+ history[(chat_id, sender)].append(f"Assistant: {message}")
63
+
64
+ def get_history_text(chat_id, sender):
65
+ return "\n".join(history[(chat_id, sender)])
66
+
67
+ def clear_history(chat_id, sender):
68
+ history[(chat_id, sender)].clear()
69
+
70
+ # --- Bot Config & Client --------------------------------------------------
71
+
72
  class BotConfig:
73
  GREEN_API_URL = os.getenv("GREEN_API_URL")
74
  GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
75
  GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
76
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
77
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
78
+ BOT_GROUP_CHAT = "120363312903494448@g.us"
79
  BOT_JID = os.getenv("BOT_JID")
80
  IMAGE_DIR = "/tmp/images"
81
  AUDIO_DIR = "/tmp/audio"
 
85
  def validate(cls):
86
  missing = [n for n in (
87
  "GREEN_API_URL","GREEN_API_TOKEN",
88
+ "GREEN_API_ID_INSTANCE","WEBHOOK_AUTH_TOKEN","BOT_JID"
89
  ) if not getattr(cls, n)]
90
  if missing:
91
  raise ValueError(f"Missing env vars: {', '.join(missing)}")
 
122
  "quotedMessageId": message_id
123
  })
124
 
125
+ def send_message_to(self, chat_id, text):
126
+ return self.send("sendMessage", {
127
+ "chatId": chat_id,
128
+ "message": text
129
+ })
130
+
131
  def send_media(self, message_id, chat_id, file_path, caption, media_type):
132
  endpoint = "sendFileByUpload"
133
  payload = {
 
143
  BotConfig.validate()
144
  client = BotClient(BotConfig)
145
 
146
+ # --- Threading & Queues ---------------------------------------------------
147
+
148
  task_queue = queue.Queue()
149
+ polls = {}
150
+ executor = ThreadPoolExecutor(max_workers=4)
151
+
152
  def worker():
153
  while True:
154
  task = task_queue.get()
 
161
  logger.error(f"Worker error {task}: {e}")
162
  finally:
163
  task_queue.task_done()
164
+
165
  for _ in range(4):
166
  threading.Thread(target=worker, daemon=True).start()
167
 
168
+ # --- Basic Tool Functions -------------------------------------------------
169
+
170
  def _fn_send_text(mid, cid, message):
171
  client.send_message(mid, cid, message)
172
  chat_id, sender, _ = get_thread_context()
173
  if chat_id and sender:
174
+ record_bot_message(chat_id, sender, message)
175
  task_queue.put({
176
  "type": "audio",
177
  "message_id": mid,
 
179
  "prompt": message
180
  })
181
 
182
+ def _fn_send_accept(mid, cid, message):
183
+ client.send_message(mid, cid, message)
184
+ chat_id, sender, _ = get_thread_context()
185
+ if chat_id and sender:
186
+ record_bot_message(chat_id, sender, message)
187
+
188
+ def _fn_summarize(mid, cid, text):
189
+ summary = generate_llm(f"Summarize:\n\n{text}")
190
+ _fn_send_text(mid, cid, summary)
191
+
192
+ def _fn_translate(mid, cid, lang, text):
193
+ resp = generate_llm(f"Translate to {lang}:\n\n{text}")
194
+ _fn_send_text(mid, cid, resp)
195
+
196
+ def _fn_joke(mid, cid):
197
+ try:
198
+ j = requests.get(
199
+ "https://official-joke-api.appspot.com/random_joke",
200
+ timeout=5
201
+ ).json()
202
+ joke = f"{j['setup']}\n\n{j['punchline']}"
203
+ except:
204
+ joke = generate_llm("Tell me a short joke.")
205
+ _fn_send_text(mid, cid, joke)
206
+
207
+ def _fn_weather(mid, cid, loc):
208
+ raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
209
+ report = generate_llm(f"Give a weather report in °C:\n\n{raw}")
210
+ _fn_send_text(mid, cid, report)
211
+
212
+ def _fn_inspire(mid, cid):
213
+ quote = generate_llm("Give me a unique, random short inspirational quote.")
214
+ _fn_send_text(mid, cid, f"✨ {quote}")
215
+
216
+ def _fn_meme(mid, cid, txt):
217
+ _fn_send_accept(mid, cid, "🎨 Generating meme…")
218
+ task_queue.put({
219
+ "type": "image",
220
+ "message_id": mid,
221
+ "chat_id": cid,
222
+ "prompt": f"meme: {txt}"
223
+ })
224
+
225
+ def _fn_poll_create(mid, cid, question, options):
226
+ votes = {i+1:0 for i in range(len(options))}
227
+ polls[cid] = {"question": question, "options": options, "votes": votes, "voters": {}}
228
+ text = f"📊 *Poll:* {question}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(options))
229
+ _fn_send_text(mid, cid, text)
230
+
231
+ def _fn_poll_vote(mid, cid, voter, choice):
232
+ poll = polls.get(cid)
233
+ if not poll or choice<1 or choice>len(poll["options"]):
234
+ return
235
+ prev = poll["voters"].get(voter)
236
+ if prev:
237
+ poll["votes"][prev] -= 1
238
+ poll["votes"][choice] += 1
239
+ poll["voters"][voter] = choice
240
+ _fn_send_text(mid, cid, f"✅ Voted for {poll['options'][choice-1]}")
241
+
242
+ def _fn_poll_results(mid, cid):
243
+ poll = polls.get(cid)
244
+ if not poll:
245
+ _fn_send_text(mid, cid, "No active poll.")
246
+ return
247
+ txt = f"📊 *Results:* {poll['question']}\n" + "\n".join(
248
+ f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
249
+ )
250
+ _fn_send_text(mid, cid, txt)
251
+
252
+ def _fn_poll_end(mid, cid):
253
+ poll = polls.pop(cid, None)
254
+ if not poll:
255
+ _fn_send_text(mid, cid, "No active poll.")
256
+ return
257
+ txt = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
258
+ f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
259
+ )
260
+ _fn_send_text(mid, cid, txt)
261
+
262
+ def _fn_generate_images(
263
+ message_id: str,
264
+ chat_id: str,
265
+ prompt: str,
266
+ count: int = 1,
267
+ width: Optional[int] = None,
268
+ height: Optional[int] = None,
269
+ **_
270
+ ):
271
+ _fn_send_accept(message_id, chat_id, f"✨ Generating {count} image(s)…")
272
+ for i in range(1, count+1):
273
  try:
274
  img, path, ret_p, url = generate_image(
275
  prompt, message_id, message_id, BotConfig.IMAGE_DIR,
276
  width=width, height=height
277
  )
278
  formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
279
+ cap = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
280
  client.send_media(message_id, chat_id, path, cap, media_type="image")
281
  os.remove(path)
282
  except Exception as e:
283
+ logger.warning(f"Img {i}/{count} failed: {e}")
284
+ _fn_send_text(message_id, chat_id, f"😢 Failed to generate image {i}/{count}.")
285
 
286
+ def _fn_voice_reply(
287
+ message_id: str,
288
+ chat_id: str,
289
+ prompt: str,
290
+ **_
291
+ ):
292
+ proc = (
293
+ f"Just say this exactly as written in a friendly, playful, "
294
+ f"happy and helpful but a little bit clumsy-cute way: {prompt}"
295
+ )
296
  res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
297
  if res and res[0]:
298
  path, _ = res
 
301
  else:
302
  _fn_send_text(message_id, chat_id, prompt)
303
 
304
+ # --- Pydantic Models for Function Calling --------------------------------
305
+
306
+ class BaseIntent(BaseModel):
307
+ action: str
308
+
309
+ class SummarizeIntent(BaseIntent):
310
+ action: Literal["summarize"]
311
+ text: str
312
+
313
+ class TranslateIntent(BaseIntent):
314
+ action: Literal["translate"]
315
+ lang: str
316
+ text: str
317
+
318
+ class JokeIntent(BaseIntent):
319
+ action: Literal["joke"]
320
+
321
+ class WeatherIntent(BaseIntent):
322
+ action: Literal["weather"]
323
+ location: str
324
+
325
+ class InspireIntent(BaseIntent):
326
+ action: Literal["inspire"]
327
+
328
+ class MemeIntent(BaseIntent):
329
+ action: Literal["meme"]
330
+ text: str
331
+
332
+ class PollCreateIntent(BaseIntent):
333
+ action: Literal["poll_create"]
334
+ question: str
335
+ options: List[str]
336
+
337
+ class PollVoteIntent(BaseIntent):
338
+ action: Literal["poll_vote"]
339
+ voter: str
340
+ choice: int
341
+
342
+ class PollResultsIntent(BaseIntent):
343
+ action: Literal["poll_results"]
344
+
345
+ class PollEndIntent(BaseIntent):
346
+ action: Literal["poll_end"]
347
+
348
+ class GenerateImageIntent(BaseModel):
349
+ action: Literal["generate_image"]
350
+ prompt: str
351
+ count: int = Field(default=1, ge=1)
352
+ width: Optional[int]
353
+ height: Optional[int]
354
+
355
+ class SendTextIntent(BaseIntent):
356
+ action: Literal["send_text"]
357
+ message: str
358
+
359
+ # list of all intent models
360
+ INTENT_MODELS = [
361
+ SummarizeIntent, TranslateIntent, JokeIntent, WeatherIntent,
362
+ InspireIntent, MemeIntent, PollCreateIntent, PollVoteIntent,
363
+ PollResultsIntent, PollEndIntent, GenerateImageIntent, SendTextIntent
364
  ]
365
 
366
+ ACTION_HANDLERS = {
367
+ "summarize": lambda mid,cid,**i: _fn_summarize(mid,cid,i["text"]),
368
+ "translate": lambda mid,cid,**i: _fn_translate(mid,cid,i["lang"],i["text"]),
369
+ "joke": lambda mid,cid,**i: _fn_joke(mid,cid),
370
+ "weather": lambda mid,cid,**i: _fn_weather(mid,cid,i["location"]),
371
+ "inspire": lambda mid,cid,**i: _fn_inspire(mid,cid),
372
+ "meme": lambda mid,cid,**i: _fn_meme(mid,cid,i["text"]),
373
+ "poll_create": lambda mid,cid,**i: _fn_poll_create(mid,cid,i["question"],i["options"]),
374
+ "poll_vote": lambda mid,cid,**i: _fn_poll_vote(mid,cid,i["voter"],i["choice"]),
375
+ "poll_results": lambda mid,cid,**i: _fn_poll_results(mid,cid),
376
+ "poll_end": lambda mid,cid,**i: _fn_poll_end(mid,cid),
377
+ "generate_image": _fn_generate_images,
378
+ "send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
379
+ }
380
+
381
+ # --- Intent Routing with Fallback & History‐Reset on 400 -------------------
382
+
383
+ def route_intent(user_input: str, chat_id: str, sender: str):
384
+ history_text = get_history_text(chat_id, sender)
385
+ sys_prompt = (
386
+ "You are Eve, a sweet, innocent, and helpful assistant. "
387
+ "You never perform work yourself—you only invoke one of the available functions. "
388
+ "When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
389
+ "Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
390
+ "If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
391
+ "Functions you can call:\n"
392
+ " summarize(text)\n"
393
+ " translate(lang, text)\n"
394
+ " joke()\n"
395
+ " weather(location)\n"
396
+ " inspire()\n"
397
+ " meme(text)\n"
398
+ " poll_create(question, options)\n"
399
+ " poll_vote(voter, choice)\n"
400
+ " poll_results()\n"
401
+ " poll_end()\n"
402
+ " generate_image(prompt, count, width, height)\n"
403
+ " send_text(message)\n\n"
404
+ "Conversation so far:\n"
405
+ f"{history_text}\n\n"
406
+ "Current user message:\n"
407
+ f"User: {user_input}"
408
+ )
409
+
410
+ #prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\n current message: User: {user_input}"
411
+
412
+ try:
413
+ raw = generate_llm(sys_prompt)
414
+ except LLMBadRequestError:
415
+ # Clear history on HTTP 400 from the LLM
416
+ clear_history(chat_id, sender)
417
+ return SendTextIntent(action="send_text", message="Oops, I lost my train of thought—let’s start fresh!")
418
+
419
+ logger.debug(f"LLM raw response: {raw}")
420
+
421
+ # 1) Strict: try each Pydantic model
422
+ try:
423
+ parsed = json.loads(raw)
424
+ logger.debug(f"Parsed JSON: {parsed}")
425
+ except json.JSONDecodeError:
426
+ return SendTextIntent(action="send_text", message=raw)
427
+
428
+ for M in INTENT_MODELS:
429
+ try:
430
+ intent = M.model_validate(parsed)
431
+ logger.debug(f"Matched intent model: {M.__name__} with data {parsed}")
432
+ return intent
433
+ except ValidationError:
434
+ continue
435
+
436
+ logger.warning("Strict parse failed for all models, falling back to lenient")
437
+
438
+ # 2) Lenient JSON get
439
+ action = parsed.get("action")
440
+ if action in ACTION_HANDLERS:
441
+ data = parsed
442
+ kwargs = {}
443
+ if action == "generate_image":
444
+ kwargs["prompt"] = data.get("prompt","")
445
+ kwargs["count"] = int(data.get("count", BotConfig.DEFAULT_IMAGE_COUNT))
446
+ kwargs["width"] = data.get("width")
447
+ kwargs["height"] = data.get("height")
448
+ elif action == "send_text":
449
+ kwargs["message"] = data.get("message","")
450
+ elif action == "translate":
451
+ kwargs["lang"] = data.get("lang","")
452
+ kwargs["text"] = data.get("text","")
453
+ elif action == "summarize":
454
+ kwargs["text"] = data.get("text","")
455
+ elif action == "weather":
456
+ kwargs["location"] = data.get("location","")
457
+ elif action == "meme":
458
+ kwargs["text"] = data.get("text","")
459
+ elif action == "poll_create":
460
+ kwargs["question"] = data.get("question","")
461
+ kwargs["options"] = data.get("options",[])
462
+ elif action == "poll_vote":
463
+ kwargs["voter"] = sender
464
+ kwargs["choice"] = int(data.get("choice",0))
465
+ try:
466
+ # coerce into Pydantic for uniform interface
467
+ model = next(
468
+ m for m in INTENT_MODELS
469
+ if getattr(m, "__fields__", {}).get("action").default == action
470
+ )
471
+ intent = model.model_validate({"action":action, **kwargs})
472
+ logger.debug(f"Leniently matched intent model: {model.__name__} with kwargs {kwargs}")
473
+ return intent
474
+ except Exception as e:
475
+ logger.error(f"Lenient parsing into Pydantic failed: {e}")
476
+ return SendTextIntent(action="send_text", message=raw)
477
+
478
+ return SendTextIntent(action="send_text", message=raw)
479
+
480
+ # --- FastAPI & Webhook ----------------------------------------------------
481
 
 
482
  app = FastAPI()
483
+ help_text = (
484
+ "🤖 *Eve* commands:\n"
485
+ "• /help\n"
486
+ "• /summarize <text>\n"
487
+ "• /translate <lang>|<text>\n"
488
+ "• /joke\n"
489
+ "• /weather <loc>\n"
490
+ "• /inspire\n"
491
+ "• /meme <text>\n"
492
+ "• /poll <Q>|… / /results / /endpoll\n"
493
+ "• /gen <prompt>|<count>|<width>|<height>\n"
494
+ "Otherwise chat or reply to my message to invoke tools."
495
+ )
496
 
497
  @app.post("/whatsapp")
498
  async def whatsapp_webhook(request: Request):
499
  data = await request.json()
500
+ if request.headers.get("Authorization") != f"Bearer {BotConfig.WEBHOOK_AUTH_TOKEN}":
501
  raise HTTPException(403, "Unauthorized")
502
 
503
  chat_id = data["senderData"]["chatId"]
504
  sender = data["senderData"]["sender"]
505
  mid = data["idMessage"]
506
  set_thread_context(chat_id, sender, mid)
507
+ logger.debug(f"Received webhook for message {mid} from {sender}")
508
+
509
+ if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"] != "incomingMessageReceived":
510
+ return {"success": True}
511
 
512
+ md = data["messageData"]
513
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
514
+ if not tmd:
515
  return {"success": True}
516
 
517
  body = (tmd.get("textMessage") or tmd.get("text","")).strip()
518
+ record_user_message(chat_id, sender, body)
519
+ logger.debug(f"User message: {body}")
520
 
521
+ low = body.lower()
522
+ # Slash commands...
523
+ if low == "/help":
524
+ _fn_send_text(mid, chat_id, help_text)
525
+ return {"success": True}
526
+ if low.startswith("/summarize "):
527
+ _fn_summarize(mid, chat_id, body[11:].strip())
528
+ return {"success": True}
529
+ if low.startswith("/translate "):
530
+ lang, txt = body[11:].split("|", 1)
531
+ _fn_translate(mid, chat_id, lang.strip(), txt.strip())
532
+ return {"success": True}
533
+ if low == "/joke":
534
+ _fn_joke(mid, chat_id)
535
+ return {"success": True}
536
+ if low.startswith("/weather "):
537
+ _fn_weather(mid, chat_id, body[9:].strip().replace(" ","+"))
538
+ return {"success": True}
539
+ if low == "/inspire":
540
+ _fn_inspire(mid, chat_id)
541
+ return {"success": True}
542
+ if low.startswith("/meme "):
543
+ _fn_meme(mid, chat_id, body[6:].strip())
544
+ return {"success": True}
545
+ if low.startswith("/poll "):
546
+ parts = [p.strip() for p in body[6:].split("|")]
547
+ _fn_poll_create(mid, chat_id, parts[0], parts[1:])
548
+ return {"success": True}
549
+ if chat_id in polls and low.isdigit():
550
+ _fn_poll_vote(mid, chat_id, sender, int(low))
551
+ return {"success": True}
552
+ if low == "/results":
553
+ _fn_poll_results(mid, chat_id)
554
+ return {"success": True}
555
+ if low == "/endpoll":
556
+ _fn_poll_end(mid, chat_id)
557
+ return {"success": True}
558
+ if low.startswith("/gen"):
559
+ parts = body[4:].split("|")
560
+ pr = parts[0].strip()
561
+ ct = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
562
+ width = int(parts[2]) if len(parts) > 2 and parts[2].isdigit() else None
563
+ height = int(parts[3]) if len(parts) > 3 and parts[3].isdigit() else None
564
+ _fn_send_accept(mid, chat_id, f"✨ Generating {ct} image(s)…")
565
+ task_queue.put({
566
+ "type": "image",
567
+ "message_id": mid,
568
+ "chat_id": chat_id,
569
+ "prompt": pr,
570
+ "count": ct,
571
+ "width": width,
572
+ "height": height
573
+ })
574
  return {"success": True}
575
 
576
+ # Skip mentions
577
+ if tmd.get("contextInfo", {}).get("mentionedJidList"):
578
+ return {"success": True}
579
+
580
+ # Handle quoted replies to the bot
581
+ if md.get("typeMessage") == "quotedMessage":
582
+ ext = md["extendedTextMessageData"]
583
+ quoted = md["quotedMessage"]
584
+ if ext.get("participant") == BotConfig.BOT_JID:
585
+ effective = (
586
+ f"Quoted: {quoted.get('textMessage','')}\n"
587
+ f"User: {ext.get('text','')}"
588
+ )
589
+ else:
590
+ effective = body
591
  else:
592
+ effective = body
593
+
594
+ # Route intent & dispatch
595
+ intent = route_intent(effective, chat_id, sender)
596
+ logger.debug(f"Final intent: {intent}")
597
+ handler = ACTION_HANDLERS.get(intent.action)
598
+ if handler:
599
+ kwargs = intent.model_dump(exclude={"action"})
600
+ logger.debug(f"Dispatching action '{intent.action}' with args {kwargs}")
601
+ handler(mid, chat_id, **kwargs)
602
+ else:
603
+ logger.warning(f"No handler for action '{intent.action}'")
604
+ _fn_send_text(mid, chat_id, "Sorry, I didn't understand that.")
605
 
606
  return {"success": True}
607
 
608
  @app.get("/", response_class=PlainTextResponse)
609
  def index():
610
+ return "Server is running!"
611
 
612
  if __name__ == "__main__":
613
+ client.send_message_to(
614
+ BotConfig.BOT_GROUP_CHAT,
615
+ "🌟 Eve is online! Type /help to see commands."
616
+ )
617
  import uvicorn
618
+ uvicorn.run(app, host="0.0.0.0", port=7860)
polLLM.py CHANGED
@@ -19,10 +19,10 @@ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"
19
  logger.addHandler(handler)
20
 
21
  # --- LLM settings from config.yaml ---
22
- _DEFAULT_MODEL = "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8"
23
- _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
- _CHAR = _config.get("char", "Eve")
25
- _CHUTES_API_KEY = os.getenv("CHUTES_API_KEY")
26
 
27
  # --- Custom exception ---
28
  class LLMBadRequestError(Exception):
@@ -36,19 +36,25 @@ client = OpenAI(
36
  )
37
 
38
  def _build_system_prompt() -> str:
39
- """Substitute {char} into the system prompt template."""
 
 
40
  return _SYSTEM_TEMPLATE.replace("{char}", _CHAR)
41
 
42
  def generate_llm(
43
- messages: list,
44
- functions: list = None,
45
- function_call: dict | str = "auto"
46
- ) -> dict:
47
  """
48
  Send a chat-completion request to the LLM, with retries and backoff.
49
- Returns the raw response dict, so we can inspect function calls.
50
  """
51
  model = _DEFAULT_MODEL
 
 
 
 
 
 
52
  backoff = 1
53
  for attempt in range(1, 6):
54
  try:
@@ -57,12 +63,11 @@ def generate_llm(
57
  resp = client.chat.completions.create(
58
  model=model,
59
  messages=messages,
60
- functions=functions or [],
61
- function_call=function_call,
62
  seed=seed,
63
  )
 
64
  logger.debug("LLM response received")
65
- return resp
66
  except Exception as e:
67
  if getattr(e, "status_code", None) == 400:
68
  logger.error("LLM error 400 (Bad Request): Not retrying.")
@@ -75,54 +80,10 @@ def generate_llm(
75
  logger.critical("LLM failed after 5 attempts, raising")
76
  raise
77
 
78
- def call_llm_function(user_input: str, history: list, functions: list):
79
- """
80
- Orchestrate a function call. Returns either:
81
- - {"name": fn_name, "arguments": {...}} if a function call is requested
82
- - {"content": "..." } for a normal chat reply
83
- """
84
- system_prompt = _build_system_prompt()
85
- messages = [{"role": "system", "content": system_prompt}] + history + [
86
- {"role": "user", "content": user_input}
87
- ]
88
-
89
- response = generate_llm(
90
- messages=messages,
91
- functions=functions,
92
- function_call="auto"
93
- )
94
-
95
- choice = response.choices[0]
96
- msg = choice.message
97
-
98
- if msg.function_call:
99
- return {
100
- "name": msg.function_call.name,
101
- "arguments": msg.function_call.arguments
102
- }
103
- else:
104
- return {"content": msg.content.strip()}
105
-
106
  # Example local test
107
  if __name__ == "__main__":
108
  logger.info("Testing generate_llm() with a sample prompt")
109
  try:
110
- res = call_llm_function("Generate 2 images of a sunset", [], [
111
- {
112
- "name": "generate_image",
113
- "description": "Generate images",
114
- "parameters": {
115
- "type": "object",
116
- "properties": {
117
- "prompt": {"type": "string"},
118
- "count": {"type": "integer"},
119
- "width": {"type": "integer"},
120
- "height": {"type": "integer"}
121
- },
122
- "required": ["prompt"]
123
- }
124
- }
125
- ])
126
- print(res)
127
  except LLMBadRequestError as e:
128
  logger.warning(f"Test failed with bad request: {e}")
 
19
  logger.addHandler(handler)
20
 
21
  # --- LLM settings from config.yaml ---
22
+ _DEFAULT_MODEL = "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8" # _config.get("model", "openai-large")
23
+ _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
+ _CHAR = _config.get("char", "Eve")
25
+ _CHUTES_API_KEY = os.getenv("CHUTES_API_KEY")
26
 
27
  # --- Custom exception ---
28
  class LLMBadRequestError(Exception):
 
36
  )
37
 
38
  def _build_system_prompt() -> str:
39
+ """
40
+ Substitute {char} into the system prompt template.
41
+ """
42
  return _SYSTEM_TEMPLATE.replace("{char}", _CHAR)
43
 
44
  def generate_llm(
45
+ prompt: str,
46
+ ) -> str:
 
 
47
  """
48
  Send a chat-completion request to the LLM, with retries and backoff.
49
+ Reads defaults from config.yaml, but can be overridden per-call.
50
  """
51
  model = _DEFAULT_MODEL
52
+ system_prompt = _build_system_prompt()
53
+ messages = [
54
+ {"role": "system", "content": system_prompt},
55
+ {"role": "user", "content": prompt},
56
+ ]
57
+
58
  backoff = 1
59
  for attempt in range(1, 6):
60
  try:
 
63
  resp = client.chat.completions.create(
64
  model=model,
65
  messages=messages,
 
 
66
  seed=seed,
67
  )
68
+ text = resp.choices[0].message.content.strip()
69
  logger.debug("LLM response received")
70
+ return text
71
  except Exception as e:
72
  if getattr(e, "status_code", None) == 400:
73
  logger.error("LLM error 400 (Bad Request): Not retrying.")
 
80
  logger.critical("LLM failed after 5 attempts, raising")
81
  raise
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  # Example local test
84
  if __name__ == "__main__":
85
  logger.info("Testing generate_llm() with a sample prompt")
86
  try:
87
+ print(generate_llm("generate 4 images of 1:1 profile picture"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  except LLMBadRequestError as e:
89
  logger.warning(f"Test failed with bad request: {e}")