Chandima Prabhath commited on
Commit
3ad83d3
Β·
1 Parent(s): 8deb81b

Enhance configuration to support function calling for image generation and text replies; improve help text for user commands.

Browse files
Files changed (2) hide show
  1. app.py +273 -231
  2. config.yaml +49 -1
app.py CHANGED
@@ -22,7 +22,7 @@ class BotConfig:
22
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
23
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
24
  BOT_GROUP_CHAT = "[email protected]"
25
- BOT_JID = os.getenv("BOT_JID") # your bot's own WhatsApp JID
26
  IMAGE_DIR = "/tmp/images"
27
  AUDIO_DIR = "/tmp/audio"
28
  DEFAULT_IMAGE_COUNT = 4
@@ -39,16 +39,19 @@ class BotConfig:
39
  ) if not getattr(cls, name)
40
  ]
41
  if missing:
42
- raise ValueError(f"Environment variables not set: {', '.join(missing)}")
43
 
44
  class BotClient:
45
  def __init__(self, cfg: BotConfig):
46
  self.cfg = cfg
47
  self.session = requests.Session()
48
- logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
 
49
 
50
  def send(self, endpoint: str, payload: dict, files=None, retries=3):
51
- url = f"{self.cfg.GREEN_API_URL}/waInstance{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/{self.cfg.GREEN_API_TOKEN}"
 
 
52
  for attempt in range(1, retries + 1):
53
  try:
54
  resp = self.session.post(
@@ -66,16 +69,26 @@ class BotClient:
66
  return {"error": str(e)}
67
 
68
  def send_message(self, message_id: str, chat_id: str, text: str):
69
- payload = {"chatId": chat_id, "message": text, "quotedMessageId": message_id}
70
- return self.send("sendMessage", payload)
 
 
 
71
 
72
  def send_message_to(self, chat_id: str, text: str):
73
- payload = {"chatId": chat_id, "message": text}
74
- return self.send("sendMessage", payload)
 
 
75
 
76
- def send_media(self, message_id: str, chat_id: str, file_path: str, caption: str, media_type: str):
 
77
  endpoint = "sendFileByUpload"
78
- payload = {"chatId": chat_id, "caption": caption, "quotedMessageId": message_id}
 
 
 
 
79
  with open(file_path, "rb") as f:
80
  mime = "image/jpeg" if media_type == "image" else "audio/mpeg"
81
  files = [("file", (os.path.basename(file_path), f, mime))]
@@ -85,7 +98,7 @@ class BotClient:
85
  BotConfig.validate()
86
  client = BotClient(BotConfig)
87
 
88
- # --- Queues, stores, threading ---
89
 
90
  task_queue = queue.Queue()
91
  trivia_store = {}
@@ -111,90 +124,222 @@ def worker():
111
  task = task_queue.get()
112
  try:
113
  if task["type"] == "image":
114
- handle_image_generation(task)
 
 
 
115
  elif task["type"] == "audio":
116
- response_audio(task["message_id"], task["chat_id"], task["prompt"])
 
 
117
  except Exception as e:
118
- logging.error(f"Error in worker for task {task}: {e}")
119
  finally:
120
  task_queue.task_done()
121
 
122
  for _ in range(4):
123
  threading.Thread(target=worker, daemon=True).start()
124
 
125
- # --- Core responders ---
126
 
127
- def response_text(message_id, chat_id, prompt):
128
- try:
129
- msg = generate_llm(prompt)
130
- client.send_message(message_id, chat_id, msg)
131
- except Exception as e:
132
- logging.error(f"LLM error: {e}")
133
- client.send_message(message_id, chat_id, "Error processing your request.")
134
 
135
- def response_audio(message_id, chat_id, prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  try:
137
- result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
138
- if result and result[0]:
139
- audio_path, _ = result
140
- client.send_media(message_id, chat_id, audio_path, "", media_type="audio")
141
- os.remove(audio_path)
142
- else:
143
- response_text(message_id, chat_id, prompt)
144
- except Exception as e:
145
- logging.error(f"Audio error: {e}")
146
- client.send_message(message_id, chat_id, "Error generating audio. Try again later.")
147
-
148
- def handle_image_generation(task):
149
- message_id = task["message_id"]
150
- chat_id = task["chat_id"]
151
- prompt = task["prompt"]
152
- count = task.get("num_images", BotConfig.DEFAULT_IMAGE_COUNT)
153
-
154
- for i in range(1, count + 1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  try:
156
  img, path, ret_prompt, url = generate_image(
157
  prompt, message_id, message_id, BotConfig.IMAGE_DIR
158
  )
159
- if not img:
160
- raise RuntimeError("generate_image returned no image")
161
- formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_prompt.split("\n\n") if p.strip())
162
  caption = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
163
  client.send_media(message_id, chat_id, path, caption, media_type="image")
164
  os.remove(path)
165
  except Exception as e:
166
  logging.warning(f"Image {i}/{count} failed: {e}")
167
- client.send_message(message_id, chat_id, f"😒 Failed to generate image {i}/{count}.")
168
-
169
- # --- Startup ---
170
-
171
- def send_startup_message():
172
- client.send_message_to(
173
- BotConfig.BOT_GROUP_CHAT,
174
- "🌟 Hi! I'm Eve, your friendly AI assistant. I'm now live and ready to help!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  )
176
-
177
- send_startup_message()
 
 
 
 
178
 
179
  # --- FastAPI App & Webhook ---
180
 
181
  app = FastAPI()
182
-
183
  help_text = (
184
- "πŸ€– *Hi there, I'm Eve!* Here are the commands you can use:\n\n"
185
- "β€’ */help* – list commands\n"
186
- "β€’ */gen <prompt>|<count>* – generate images (default 4)\n"
187
- "β€’ */summarize <text>*\n"
188
- "β€’ */translate <lang>|<text>*\n"
189
- "β€’ */joke*\n"
190
- "β€’ */weather <location>*\n"
191
- "β€’ */weatherpoem <location>*\n"
192
- "β€’ */inspire*\n"
193
- "β€’ */trivia* / *answer*\n"
194
- "β€’ */meme <text>*\n"
195
- "β€’ */poll <Q>|<opt1>|… / /results / /endpoll*\n\n"
196
- "Reply to one of my messages β†’ I'll LLM‑answer it.\n"
197
- "Any other text β†’ voice reply."
198
  )
199
 
200
  @app.post("/whatsapp")
@@ -207,205 +352,102 @@ async def whatsapp_webhook(request: Request):
207
  raise HTTPException(403, "Unauthorized")
208
 
209
  data = await request.json()
210
- logging.debug(f"Received data: {data}")
211
  chat_id = data.get("senderData", {}).get("chatId")
212
  if chat_id != BotConfig.BOT_GROUP_CHAT or data.get("typeWebhook") != "incomingMessageReceived":
213
  return {"success": True}
214
 
215
- md = data.get("messageData", {})
216
- mid = data["idMessage"]
 
 
 
 
 
217
 
218
- # --- Quoted‑reply handling ---
219
  if md.get("typeMessage") == "quotedMessage":
220
- ext = md.get("extendedTextMessageData", {})
221
- quoted = md.get("quotedMessage", {})
222
- quoted_participant = ext.get("participant")
223
- # only if the quoted message was from the bot
224
- if quoted_participant == BotConfig.BOT_JID:
225
  user_reply = ext.get("text", "")
226
  quoted_text = quoted.get("textMessage", "")
227
  prompt = (
228
  f"You asked: {quoted_text}\n"
229
  f"User replied: {user_reply}\n"
230
- "Provide a helpful, concise follow‑up."
231
  )
232
- reply = generate_llm(prompt)
233
- client.send_message(mid, chat_id, reply)
234
- voice_prompt = f"Speak only this dialog: {reply}"
235
- task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
 
 
236
  return {"success": True}
237
 
238
- # --- Extract normal text + skip mentions ---
239
- text_data = md.get("textMessageData") or md.get("extendedTextMessageData")
240
- if not text_data:
241
- return {"success": True}
242
- body = text_data.get("textMessage", text_data.get("text", "")).strip()
243
- ctx = text_data.get("contextInfo", {})
244
  if ctx.get("mentionedJidList"):
245
  return {"success": True}
246
 
247
  low = body.lower()
248
 
249
- # --- COMMANDS ---
250
  if low == "/help":
251
- client.send_message(mid, chat_id, help_text)
252
- return {"success": True}
253
-
254
  if low.startswith("/summarize "):
255
- summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{body[11:].strip()}")
256
- client.send_message(mid, chat_id, summary)
257
- return {"success": True}
258
-
259
  if low.startswith("/translate "):
260
- part = body[11:]
261
- if "|" not in part:
262
- client.send_message(mid, chat_id, "Please use `/translate <language>|<text>`")
263
- else:
264
- lang, txt = part.split("|", 1)
265
- resp = generate_llm(f"Translate the following into {lang.strip()}:\n\n{txt.strip()}")
266
- client.send_message(mid, chat_id, resp)
267
- return {"success": True}
268
-
269
  if low == "/joke":
270
- try:
271
- joke = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
272
- client.send_message(mid, chat_id, f"{joke['setup']}\n\n{joke['punchline']}")
273
- except:
274
- client.send_message(mid, chat_id, generate_llm("Tell me a short, funny joke."))
275
- return {"success": True}
276
-
277
  if low.startswith("/weather "):
278
- loc = body[9:].strip().replace(" ", "+")
279
- try:
280
- raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
281
- prompt = (
282
- f"Convert this weather report into Celsius and craft a short, creative, "
283
- f"beautiful weather report with emojis:\n\n{raw}"
284
- )
285
- report = generate_llm(prompt)
286
- client.send_message(mid, chat_id, report)
287
- voice_prompt = f"Provide only the following weather report as speech: {report}"
288
- task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
289
- except:
290
- client.send_message(mid, chat_id, "Could not fetch weather.")
291
- return {"success": True}
292
-
293
  if low.startswith("/weatherpoem "):
294
- loc = body[13:].strip().replace(" ", "+")
295
- try:
296
- raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
297
- poem = generate_llm(f"Write a short, poetic weather summary in Celsius based on this:\n\n{raw}")
298
- client.send_message(mid, chat_id, poem)
299
- voice_prompt = f"Speak only this poetic weather summary: {poem}"
300
- task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
301
- except:
302
- client.send_message(mid, chat_id, "Could not fetch weather.")
303
- return {"success": True}
304
-
305
  if low == "/inspire":
306
- quote = generate_llm(f"Give me a short inspirational unique quote with seed {random.randint(0,1e6)}.")
307
- client.send_message(mid, chat_id, f"✨ {quote}")
308
- return {"success": True}
309
-
310
  if low == "/trivia":
311
- raw = generate_llm(
312
- f"Generate a unique trivia Q&A in JSON:\n"
313
- '{"question":"...","answer":"..."}'
314
- )
315
- try:
316
- obj = json.loads(raw.strip().strip("```json").strip("```"))
317
- trivia_store[chat_id] = obj
318
- client.send_message(mid, chat_id, f"❓ {obj['question']}\nReply `/answer` or `/answer your guess`.")
319
- except:
320
- client.send_message(mid, chat_id, "Failed to generate trivia.")
321
- return {"success": True}
322
-
323
  if low.startswith("/answer"):
324
- resp = body[7:].strip()
325
- if chat_id in trivia_store:
326
- qa = trivia_store.pop(chat_id)
327
- if resp:
328
- verdict = generate_llm(
329
- f"Q: {qa['question']}\nCorrect: {qa['answer']}\nUser: {resp}\nCorrect?"
330
- )
331
- client.send_message(mid, chat_id, verdict)
332
- else:
333
- client.send_message(mid, chat_id, f"πŸ’‘ Answer: {qa['answer']}")
334
- else:
335
- client.send_message(mid, chat_id, "No active trivia. `/trivia` to start.")
336
- return {"success": True}
337
-
338
  if low.startswith("/meme "):
339
- client.send_message(mid, chat_id, "🎨 Generating your meme...")
340
- task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,"prompt":f"meme: {body[6:].strip()}"})
341
- return {"success": True}
342
-
343
  if low.startswith("/poll "):
344
- parts = body[6:].split("|")
345
- if len(parts) < 3:
346
- client.send_message(mid, chat_id, "Use `/poll Q|A|B`")
347
- else:
348
- q, *opts = [p.strip() for p in parts]
349
- polls[chat_id] = {"question":q,"options":opts,"votes":{i+1:0 for i in range(len(opts))},"voters":{}}
350
- text = f"πŸ“Š *Poll:* {q}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(opts))
351
- client.send_message(mid, chat_id, text)
352
- return {"success": True}
353
-
354
  if chat_id in polls and low.isdigit():
355
- n = int(low)
356
- poll = polls[chat_id]
357
- if 1 <= n <= len(poll["options"]):
358
- prev = poll["voters"].get(data["senderData"].get("sender"))
359
- if prev:
360
- poll["votes"][prev] -= 1
361
- poll["votes"][n] += 1
362
- poll["voters"][data["senderData"].get("sender")] = n
363
- client.send_message(mid, chat_id, f"βœ… Voted for {poll['options'][n-1]}")
364
- return {"success": True}
365
-
366
  if low == "/results":
367
- if chat_id in polls:
368
- p = polls[chat_id]
369
- text = f"πŸ“Š *Results:* {p['question']}\n" + "\n".join(
370
- f"{i}. {o}: {p['votes'][i]}" for i,o in enumerate(p["options"],1)
371
- )
372
- client.send_message(mid, chat_id, text)
373
- else:
374
- client.send_message(mid, chat_id, "No active poll.")
375
- return {"success": True}
376
-
377
  if low == "/endpoll":
378
- if chat_id in polls:
379
- p = polls.pop(chat_id)
380
- text = f"πŸ“Š *Final Results:* {p['question']}\n" + "\n".join(
381
- f"{i}. {o}: {p['votes'][i]}" for i,o in enumerate(p["options"],1)
382
- )
383
- client.send_message(mid, chat_id, text)
384
- else:
385
- client.send_message(mid, chat_id, "No active poll.")
386
- return {"success": True}
387
-
388
  if low.startswith("/gen"):
389
- parts = body[4:].split("|", 1)
390
  prompt = parts[0].strip()
391
- count = BotConfig.DEFAULT_IMAGE_COUNT
392
- if len(parts) > 1 and parts[1].strip().isdigit():
393
- count = int(parts[1].strip())
394
- if not prompt:
395
- client.send_message(mid, chat_id, "Use `/gen <prompt>|<count>`")
396
- else:
397
- client.send_message(mid, chat_id, f"✨ Generating {count} image(s)...")
398
- task_queue.put({
399
- "type":"image","message_id":mid,"chat_id":chat_id,
400
- "prompt":prompt,"num_images":count
401
- })
402
  return {"success": True}
403
 
404
- # Fallback β†’ voice reply
405
- reply = generate_llm(body)
406
- client.send_message(mid, chat_id, reply)
407
- voice_prompt = f"Speak only this dialog: {reply}"
408
- task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
 
 
 
 
 
 
 
 
 
 
 
409
  return {"success": True}
410
 
411
  @app.get("/", response_class=PlainTextResponse)
 
22
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
23
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
24
  BOT_GROUP_CHAT = "[email protected]"
25
+ BOT_JID = os.getenv("BOT_JID") # your bot's own WhatsApp JID
26
  IMAGE_DIR = "/tmp/images"
27
  AUDIO_DIR = "/tmp/audio"
28
  DEFAULT_IMAGE_COUNT = 4
 
39
  ) if not getattr(cls, name)
40
  ]
41
  if missing:
42
+ raise ValueError(f"Missing env vars: {', '.join(missing)}")
43
 
44
  class BotClient:
45
  def __init__(self, cfg: BotConfig):
46
  self.cfg = cfg
47
  self.session = requests.Session()
48
+ logging.basicConfig(level=logging.DEBUG,
49
+ format="%(asctime)s [%(levelname)s] %(message)s")
50
 
51
  def send(self, endpoint: str, payload: dict, files=None, retries=3):
52
+ url = (f"{self.cfg.GREEN_API_URL}/waInstance"
53
+ f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
54
+ f"{self.cfg.GREEN_API_TOKEN}")
55
  for attempt in range(1, retries + 1):
56
  try:
57
  resp = self.session.post(
 
69
  return {"error": str(e)}
70
 
71
  def send_message(self, message_id: str, chat_id: str, text: str):
72
+ return self.send("sendMessage", {
73
+ "chatId": chat_id,
74
+ "message": text,
75
+ "quotedMessageId": message_id
76
+ })
77
 
78
  def send_message_to(self, chat_id: str, text: str):
79
+ return self.send("sendMessage", {
80
+ "chatId": chat_id,
81
+ "message": text
82
+ })
83
 
84
+ def send_media(self, message_id: str, chat_id: str, file_path: str,
85
+ caption: str, media_type: str):
86
  endpoint = "sendFileByUpload"
87
+ payload = {
88
+ "chatId": chat_id,
89
+ "caption": caption,
90
+ "quotedMessageId": message_id
91
+ }
92
  with open(file_path, "rb") as f:
93
  mime = "image/jpeg" if media_type == "image" else "audio/mpeg"
94
  files = [("file", (os.path.basename(file_path), f, mime))]
 
98
  BotConfig.validate()
99
  client = BotClient(BotConfig)
100
 
101
+ # --- Threading, Queues, Stores ---
102
 
103
  task_queue = queue.Queue()
104
  trivia_store = {}
 
124
  task = task_queue.get()
125
  try:
126
  if task["type"] == "image":
127
+ _fn_generate_images(task["message_id"],
128
+ task["chat_id"],
129
+ task["prompt"],
130
+ task.get("num_images", 1))
131
  elif task["type"] == "audio":
132
+ _fn_voice_reply(task["message_id"],
133
+ task["chat_id"],
134
+ task["prompt"])
135
  except Exception as e:
136
+ logging.error(f"Worker error {task}: {e}")
137
  finally:
138
  task_queue.task_done()
139
 
140
  for _ in range(4):
141
  threading.Thread(target=worker, daemon=True).start()
142
 
143
+ # --- Primitive β€œtool” functions ---
144
 
145
+ def _fn_summarize(message_id, chat_id, text):
146
+ summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{text}")
147
+ client.send_message(message_id, chat_id, summary)
148
+
149
+ def _fn_translate(message_id, chat_id, lang, text):
150
+ resp = generate_llm(f"Translate the following into {lang}:\n\n{text}")
151
+ client.send_message(message_id, chat_id, resp)
152
 
153
+ def _fn_joke(message_id, chat_id):
154
+ try:
155
+ j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
156
+ joke = f"{j['setup']}\n\n{j['punchline']}"
157
+ except:
158
+ joke = generate_llm("Tell me a short, funny joke.")
159
+ client.send_message(message_id, chat_id, joke)
160
+
161
+ def _fn_weather(message_id, chat_id, loc):
162
+ raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
163
+ report = generate_llm(
164
+ f"Convert this weather report into Celsius and craft a short, creative report:\n\n{raw}"
165
+ )
166
+ client.send_message(message_id, chat_id, report)
167
+ task_queue.put({
168
+ "type":"audio","message_id":message_id,"chat_id":chat_id,
169
+ "prompt":f"Speak only this weather report: {report}"
170
+ })
171
+
172
+ def _fn_weather_poem(message_id, chat_id, loc):
173
+ raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
174
+ poem = generate_llm(
175
+ f"Write a short, poetic weather summary in Celsius based on:\n\n{raw}"
176
+ )
177
+ client.send_message(message_id, chat_id, poem)
178
+ task_queue.put({
179
+ "type":"audio","message_id":message_id,"chat_id":chat_id,
180
+ "prompt":f"Speak only this poetic weather summary: {poem}"
181
+ })
182
+
183
+ def _fn_inspire(message_id, chat_id):
184
+ quote = generate_llm(f"Give me a short inspirational unique quote.")
185
+ client.send_message(message_id, chat_id, f"✨ {quote}")
186
+
187
+ def _fn_trivia(message_id, chat_id):
188
+ raw = generate_llm(
189
+ f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
190
+ )
191
  try:
192
+ obj = json.loads(raw.strip().strip("```json").strip("```"))
193
+ trivia_store[chat_id] = obj
194
+ client.send_message(
195
+ message_id, chat_id,
196
+ f"❓ {obj['question']}\nReply `/answer` or `/answer your guess`."
197
+ )
198
+ except:
199
+ client.send_message(message_id, chat_id, "Failed to generate trivia.")
200
+
201
+ def _fn_answer(message_id, chat_id, guess):
202
+ if chat_id not in trivia_store:
203
+ client.send_message(message_id, chat_id, "No active trivia. `/trivia` to start.")
204
+ return
205
+ qa = trivia_store.pop(chat_id)
206
+ if guess:
207
+ verdict = generate_llm(
208
+ f"Q: {qa['question']}\nCorrect: {qa['answer']}\nUser: {guess}\nCorrect?"
209
+ )
210
+ client.send_message(message_id, chat_id, verdict)
211
+ else:
212
+ client.send_message(message_id, chat_id, f"πŸ’‘ Answer: {qa['answer']}")
213
+
214
+ def _fn_meme(message_id, chat_id, txt):
215
+ client.send_message(message_id, chat_id, "🎨 Generating your meme...")
216
+ task_queue.put({"type":"image","message_id":message_id,
217
+ "chat_id":chat_id,"prompt":f"meme: {txt}"})
218
+
219
+ def _fn_poll(message_id, chat_id, question, options):
220
+ votes = {i+1:0 for i in range(len(options))}
221
+ polls[chat_id] = {"question":question,"options":options,"votes":votes,"voters":{}}
222
+ text = f"πŸ“Š *Poll:* {question}\n" + "\n".join(
223
+ f"{i+1}. {o}" for i,o in enumerate(options)
224
+ )
225
+ client.send_message(message_id, chat_id, text)
226
+
227
+ def _fn_poll_vote(message_id, chat_id, voter, choice):
228
+ poll = polls.get(chat_id)
229
+ if not poll or choice < 1 or choice > len(poll["options"]):
230
+ return
231
+ prev = poll["voters"].get(voter)
232
+ if prev:
233
+ poll["votes"][prev] -= 1
234
+ poll["votes"][choice] += 1
235
+ poll["voters"][voter] = choice
236
+ client.send_message(message_id, chat_id,
237
+ f"βœ… Voted for {poll['options'][choice-1]}")
238
+
239
+ def _fn_poll_results(message_id, chat_id):
240
+ poll = polls.get(chat_id)
241
+ if not poll:
242
+ client.send_message(message_id, chat_id, "No active poll.")
243
+ return
244
+ text = f"πŸ“Š *Results:* {poll['question']}\n" + "\n".join(
245
+ f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
246
+ )
247
+ client.send_message(message_id, chat_id, text)
248
+
249
+ def _fn_poll_end(message_id, chat_id):
250
+ poll = polls.pop(chat_id, None)
251
+ if not poll:
252
+ client.send_message(message_id, chat_id, "No active poll.")
253
+ return
254
+ text = f"πŸ“Š *Final Results:* {poll['question']}\n" + "\n".join(
255
+ f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
256
+ )
257
+ client.send_message(message_id, chat_id, text)
258
+
259
+ def _fn_generate_images(message_id, chat_id, prompt, count=1):
260
+ for i in range(1, count+1):
261
  try:
262
  img, path, ret_prompt, url = generate_image(
263
  prompt, message_id, message_id, BotConfig.IMAGE_DIR
264
  )
265
+ formatted = "\n\n".join(f"_{p.strip()}_"
266
+ for p in ret_prompt.split("\n\n") if p.strip())
 
267
  caption = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
268
  client.send_media(message_id, chat_id, path, caption, media_type="image")
269
  os.remove(path)
270
  except Exception as e:
271
  logging.warning(f"Image {i}/{count} failed: {e}")
272
+ client.send_message(message_id, chat_id,
273
+ f"😒 Failed to generate image {i}/{count}.")
274
+
275
+ def _fn_voice_reply(message_id, chat_id, prompt):
276
+ result = generate_voice_reply(prompt,
277
+ model="openai-audio",
278
+ voice="coral",
279
+ audio_dir=BotConfig.AUDIO_DIR)
280
+ if result and result[0]:
281
+ audio_path, _ = result
282
+ client.send_media(message_id, chat_id, audio_path, "", media_type="audio")
283
+ os.remove(audio_path)
284
+ else:
285
+ # fallback to text
286
+ response = generate_llm(prompt)
287
+ client.send_message(message_id, chat_id, response)
288
+
289
+ # --- Intent router for fallback ---
290
+
291
+ FUNCTION_SCHEMA = {
292
+ "generate_image": {
293
+ "description": "Generate one or more images",
294
+ "params": ["prompt","count"]
295
+ },
296
+ "send_text": {
297
+ "description": "Send a plain text response",
298
+ "params": ["message"]
299
+ }
300
+ }
301
+
302
+ def route_intent(user_input: str):
303
+ """
304
+ Ask the LLM whether to call a function or just chat.
305
+ Expects a JSON response like:
306
+ {"action":"generate_image","prompt":"a sunset","count":2}
307
+ or
308
+ {"action":"send_text","message":"Here's my reply..."}
309
+ """
310
+ sys_prompt = (
311
+ "You are Eve. You can either chat normally or call one of these functions:\n"
312
+ + "\n".join(f"- {name}: {info['description']}"
313
+ for name,info in FUNCTION_SCHEMA.items())
314
+ + "\n\nIf the user wants an image generated, return JSON with "
315
+ "\"action\":\"generate_image\",\"prompt\":\"...\",\"count\":<int>.\n"
316
+ "Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
317
+ "Do NOT wrap your response in any extra textβ€”only raw JSON."
318
  )
319
+ raw = generate_llm(f"{sys_prompt}\nUser: {user_input}")
320
+ try:
321
+ return json.loads(raw)
322
+ except:
323
+ # fallback: treat entire raw as chat
324
+ return {"action":"send_text","message":raw}
325
 
326
  # --- FastAPI App & Webhook ---
327
 
328
  app = FastAPI()
 
329
  help_text = (
330
+ "πŸ€– *Hi, I'm Eve!* Commands:\n"
331
+ "β€’ /help\n"
332
+ "β€’ /summarize <text>\n"
333
+ "β€’ /translate <lang>|<text>\n"
334
+ "β€’ /joke\n"
335
+ "β€’ /weather <loc>\n"
336
+ "β€’ /weatherpoem <loc>\n"
337
+ "β€’ /inspire\n"
338
+ "β€’ /trivia / /answer\n"
339
+ "β€’ /meme <text>\n"
340
+ "β€’ /poll <Q>|<opt1>|… / /results / /endpoll\n"
341
+ "β€’ /gen <prompt>|<count>\n"
342
+ "Otherwise I’ll chat or generate images for you!"
 
343
  )
344
 
345
  @app.post("/whatsapp")
 
352
  raise HTTPException(403, "Unauthorized")
353
 
354
  data = await request.json()
 
355
  chat_id = data.get("senderData", {}).get("chatId")
356
  if chat_id != BotConfig.BOT_GROUP_CHAT or data.get("typeWebhook") != "incomingMessageReceived":
357
  return {"success": True}
358
 
359
+ md = data["messageData"]
360
+ mid = data["idMessage"]
361
+ tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
362
+ if not tmd:
363
+ return {"success": True}
364
+ body = tmd.get("textMessage", tmd.get("text", "")).strip()
365
+ ctx = tmd.get("contextInfo", {})
366
 
367
+ # 1) Quoted‑reply to bot
368
  if md.get("typeMessage") == "quotedMessage":
369
+ ext = md["extendedTextMessageData"]
370
+ quoted = md["quotedMessage"]
371
+ if ext.get("participant") == BotConfig.BOT_JID:
 
 
372
  user_reply = ext.get("text", "")
373
  quoted_text = quoted.get("textMessage", "")
374
  prompt = (
375
  f"You asked: {quoted_text}\n"
376
  f"User replied: {user_reply}\n"
377
+ "Provide a helpful follow‑up."
378
  )
379
+ ans = generate_llm(prompt)
380
+ client.send_message(mid, chat_id, ans)
381
+ task_queue.put({
382
+ "type":"audio","message_id":mid,
383
+ "chat_id":chat_id,"prompt":ans
384
+ })
385
  return {"success": True}
386
 
387
+ # 2) Mentions skip
 
 
 
 
 
388
  if ctx.get("mentionedJidList"):
389
  return {"success": True}
390
 
391
  low = body.lower()
392
 
393
+ # 3) Slash‑commands
394
  if low == "/help":
395
+ client.send_message(mid, chat_id, help_text); return {"success": True}
 
 
396
  if low.startswith("/summarize "):
397
+ _fn_summarize(mid, chat_id, body[11:].strip()); return {"success": True}
 
 
 
398
  if low.startswith("/translate "):
399
+ lang, txt = body[11:].split("|",1)
400
+ _fn_translate(mid, chat_id, lang.strip(), txt.strip()); return {"success": True}
 
 
 
 
 
 
 
401
  if low == "/joke":
402
+ _fn_joke(mid, chat_id); return {"success": True}
 
 
 
 
 
 
403
  if low.startswith("/weather "):
404
+ _fn_weather(mid, chat_id, body[9:].strip().replace(" ","+")); return {"success": True}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  if low.startswith("/weatherpoem "):
406
+ _fn_weather_poem(mid, chat_id, body[13:].strip().replace(" ","+")); return {"success": True}
 
 
 
 
 
 
 
 
 
 
407
  if low == "/inspire":
408
+ _fn_inspire(mid, chat_id); return {"success": True}
 
 
 
409
  if low == "/trivia":
410
+ _fn_trivia(mid, chat_id); return {"success": True}
 
 
 
 
 
 
 
 
 
 
 
411
  if low.startswith("/answer"):
412
+ _fn_answer(mid, chat_id, body[7:].strip()); return {"success": True}
 
 
 
 
 
 
 
 
 
 
 
 
 
413
  if low.startswith("/meme "):
414
+ _fn_meme(mid, chat_id, body[6:].strip()); return {"success": True}
 
 
 
415
  if low.startswith("/poll "):
416
+ parts = [p.strip() for p in body[6:].split("|")]
417
+ _fn_poll(mid, chat_id, parts[0], parts[1:]); return {"success": True}
 
 
 
 
 
 
 
 
418
  if chat_id in polls and low.isdigit():
419
+ _fn_poll_vote(mid, chat_id,
420
+ data["senderData"]["sender"],
421
+ int(low)); return {"success": True}
 
 
 
 
 
 
 
 
422
  if low == "/results":
423
+ _fn_poll_results(mid, chat_id); return {"success": True}
 
 
 
 
 
 
 
 
 
424
  if low == "/endpoll":
425
+ _fn_poll_end(mid, chat_id); return {"success": True}
 
 
 
 
 
 
 
 
 
426
  if low.startswith("/gen"):
427
+ parts = body[4:].split("|",1)
428
  prompt = parts[0].strip()
429
+ cnt = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
430
+ client.send_message(mid, chat_id, f"✨ Generating {cnt} image(s)…")
431
+ task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
432
+ "prompt":prompt,"num_images":cnt})
 
 
 
 
 
 
 
433
  return {"success": True}
434
 
435
+ # 4) Fallback β†’ function calling router
436
+ intent = route_intent(body)
437
+ act = intent.get("action")
438
+ if act == "generate_image":
439
+ pr = intent.get("prompt","")
440
+ ct = intent.get("count",1)
441
+ client.send_message(mid, chat_id, f"πŸ‘ Generating {ct} images for β€œ{pr}”…")
442
+ task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
443
+ "prompt":pr,"num_images":ct})
444
+ else:
445
+ # send_text or any unknown
446
+ msg = intent.get("message", "Sorry, I didn't understand.")
447
+ client.send_message(mid, chat_id, msg)
448
+ task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,
449
+ "prompt":msg})
450
+
451
  return {"success": True}
452
 
453
  @app.get("/", response_class=PlainTextResponse)
config.yaml CHANGED
@@ -19,9 +19,57 @@ config:
19
  β€’ /results β€” show poll results
20
  β€’ /endpoll β€” end the poll
21
  Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
22
- For any other message, respond via voice or text.
23
  char: Eve
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  SD:
26
  steps: 30
27
  negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
 
19
  β€’ /results β€” show poll results
20
  β€’ /endpoll β€” end the poll
21
  Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
22
+ For any other message, you can either chat normally or invoke one of your tools.
23
  char: Eve
24
 
25
+ # Tell your LLM client to expose these functions
26
+ function_calling: auto
27
+
28
+ functions:
29
+ - name: generate_image
30
+ description: Generate one or more images from a prompt.
31
+ parameters:
32
+ type: object
33
+ properties:
34
+ prompt:
35
+ type: string
36
+ description: The text prompt to generate an image for
37
+ count:
38
+ type: integer
39
+ description: Number of images to generate
40
+ required:
41
+ - prompt
42
+
43
+ - name: send_text
44
+ description: Send a plain text reply back to the user.
45
+ parameters:
46
+ type: object
47
+ properties:
48
+ message:
49
+ type: string
50
+ description: The text content to send
51
+ required:
52
+ - message
53
+
54
+ bot:
55
+ default_image_count: 4
56
+ skip:
57
+ mentions: true
58
+ quotes: true
59
+
60
+ image:
61
+ model: flux
62
+ width: 1920
63
+ height: 1080
64
+ enhance: true
65
+ safe: false
66
+ nologo: true
67
+
68
+ voice:
69
+ model: openai-audio
70
+ voice: coral
71
+
72
+
73
  SD:
74
  steps: 30
75
  negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers