Chandima Prabhath commited on
Commit
d8d0078
·
1 Parent(s): a2104ab

Increase max_tokens for LLM generation to 8000; update example usage comment

Browse files
Files changed (2) hide show
  1. app.py +198 -254
  2. polLLM.py +3 -2
app.py CHANGED
@@ -22,22 +22,17 @@ class BotConfig:
22
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
23
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
24
  BOT_GROUP_CHAT = "[email protected]"
25
- BOT_JID = os.getenv("BOT_JID") # your bot's own WhatsApp JID
26
  IMAGE_DIR = "/tmp/images"
27
  AUDIO_DIR = "/tmp/audio"
28
  DEFAULT_IMAGE_COUNT = 4
29
 
30
  @classmethod
31
  def validate(cls):
32
- missing = [
33
- name for name in (
34
- "GREEN_API_URL",
35
- "GREEN_API_TOKEN",
36
- "GREEN_API_ID_INSTANCE",
37
- "WEBHOOK_AUTH_TOKEN",
38
- "BOT_JID",
39
- ) if not getattr(cls, name)
40
- ]
41
  if missing:
42
  raise ValueError(f"Missing env vars: {', '.join(missing)}")
43
 
@@ -48,11 +43,11 @@ class BotClient:
48
  logging.basicConfig(level=logging.DEBUG,
49
  format="%(asctime)s [%(levelname)s] %(message)s")
50
 
51
- def send(self, endpoint: str, payload: dict, files=None, retries=3):
52
  url = (f"{self.cfg.GREEN_API_URL}/waInstance"
53
  f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
54
  f"{self.cfg.GREEN_API_TOKEN}")
55
- for attempt in range(1, retries + 1):
56
  try:
57
  resp = self.session.post(
58
  url,
@@ -63,355 +58,304 @@ class BotClient:
63
  resp.raise_for_status()
64
  return resp.json()
65
  except requests.RequestException as e:
66
- logging.warning(f"Attempt {attempt}/{retries} failed for {endpoint}: {e}")
67
- if attempt == retries:
68
- logging.error(f"{endpoint} ultimately failed: {e}")
69
- return {"error": str(e)}
70
 
71
- def send_message(self, message_id: str, chat_id: str, text: str):
72
  return self.send("sendMessage", {
73
  "chatId": chat_id,
74
  "message": text,
75
  "quotedMessageId": message_id
76
  })
77
 
78
- def send_message_to(self, chat_id: str, text: str):
79
  return self.send("sendMessage", {
80
  "chatId": chat_id,
81
  "message": text
82
  })
83
 
84
- def send_media(self, message_id: str, chat_id: str, file_path: str,
85
- caption: str, media_type: str):
86
  endpoint = "sendFileByUpload"
87
  payload = {
88
  "chatId": chat_id,
89
  "caption": caption,
90
  "quotedMessageId": message_id
91
  }
92
- with open(file_path, "rb") as f:
93
- mime = "image/jpeg" if media_type == "image" else "audio/mpeg"
94
- files = [("file", (os.path.basename(file_path), f, mime))]
95
- return self.send(endpoint, payload, files=files)
96
 
97
- # Validate env
98
  BotConfig.validate()
99
  client = BotClient(BotConfig)
100
 
101
- # --- Threading, Queues, Stores ---
102
 
103
- task_queue = queue.Queue()
104
- polls = {}
105
- last_message_time = time.time()
106
 
107
- def inactivity_monitor():
108
- global last_message_time
109
- while True:
110
- time.sleep(60)
111
- if time.time() - last_message_time >= 300:
112
- client.send_message_to(
113
- BotConfig.BOT_GROUP_CHAT,
114
- "⏰ I haven't heard from you in a while! I'm still here if you need anything."
115
- )
116
- last_message_time = time.time()
117
-
118
- #threading.Thread(target=inactivity_monitor, daemon=True).start()
119
-
120
- executor = ThreadPoolExecutor(max_workers=4)
121
  def worker():
122
  while True:
123
- task = task_queue.get()
124
  try:
125
- if task["type"] == "image":
126
- _fn_generate_images(task["message_id"],
127
- task["chat_id"],
128
- task["prompt"],
129
- task.get("num_images", 1))
130
- elif task["type"] == "audio":
131
- _fn_voice_reply(task["message_id"],
132
- task["chat_id"],
133
- task['prompt'])
134
  except Exception as e:
135
- logging.error(f"Worker error {task}: {e}")
136
  finally:
137
  task_queue.task_done()
138
 
139
  for _ in range(4):
140
- threading.Thread(target=worker, daemon=True).start()
141
 
142
- # --- Primitive “tool” functions ---
143
 
144
- def _fn_summarize(message_id, chat_id, text):
145
- summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{text}")
146
- client.send_message(message_id, chat_id, summary)
147
 
148
- def _fn_translate(message_id, chat_id, lang, text):
149
- resp = generate_llm(f"Translate the following into {lang}:\n\n{text}")
150
- client.send_message(message_id, chat_id, resp)
151
 
152
- def _fn_joke(message_id, chat_id):
153
  try:
154
- j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
155
  joke = f"{j['setup']}\n\n{j['punchline']}"
156
  except:
157
- joke = generate_llm("Tell me a short, funny joke.")
158
- client.send_message(message_id, chat_id, joke)
159
 
160
- def _fn_weather(message_id, chat_id, loc):
161
- raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
162
- report = generate_llm(
163
- f"Convert this weather report into Celsius and craft a short, creative report:\n\n{raw}"
164
- )
165
- client.send_message(message_id, chat_id, report)
166
- task_queue.put({
167
- "type":"audio","message_id":message_id,"chat_id":chat_id,
168
- "prompt":f"Speak only this weather report: {report}"
169
- })
170
-
171
- def _fn_inspire(message_id, chat_id):
172
- quote = generate_llm(f"Give me a short inspirational unique quote.")
173
- client.send_message(message_id, chat_id, f"✨ {quote}")
174
-
175
- def _fn_meme(message_id, chat_id, txt):
176
- client.send_message(message_id, chat_id, "🎨 Generating your meme...")
177
- task_queue.put({"type":"image","message_id":message_id,
178
- "chat_id":chat_id,"prompt":f"meme: {txt}"})
179
-
180
- def _fn_poll(message_id, chat_id, question, options):
181
  votes = {i+1:0 for i in range(len(options))}
182
- polls[chat_id] = {"question":question,"options":options,"votes":votes,"voters":{}}
183
- text = f"📊 *Poll:* {question}\n" + "\n".join(
184
- f"{i+1}. {o}" for i,o in enumerate(options)
185
- )
186
- client.send_message(message_id, chat_id, text)
187
 
188
- def _fn_poll_vote(message_id, chat_id, voter, choice):
189
- poll = polls.get(chat_id)
190
- if not poll or choice < 1 or choice > len(poll["options"]):
191
- return
192
  prev = poll["voters"].get(voter)
193
- if prev:
194
- poll["votes"][prev] -= 1
195
- poll["votes"][choice] += 1
196
- poll["voters"][voter] = choice
197
- client.send_message(message_id, chat_id,
198
- f"✅ Voted for {poll['options'][choice-1]}")
199
-
200
- def _fn_poll_results(message_id, chat_id):
201
- poll = polls.get(chat_id)
202
  if not poll:
203
- client.send_message(message_id, chat_id, "No active poll.")
204
  return
205
- text = f"📊 *Results:* {poll['question']}\n" + "\n".join(
206
  f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
207
  )
208
- client.send_message(message_id, chat_id, text)
209
 
210
- def _fn_poll_end(message_id, chat_id):
211
- poll = polls.pop(chat_id, None)
212
  if not poll:
213
- client.send_message(message_id, chat_id, "No active poll.")
214
  return
215
- text = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
216
  f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
217
  )
218
- client.send_message(message_id, chat_id, text)
219
 
220
- def _fn_generate_images(message_id, chat_id, prompt, count=1):
221
- for i in range(1, count+1):
222
  try:
223
- img, path, ret_prompt, url = generate_image(
224
- prompt, message_id, message_id, BotConfig.IMAGE_DIR
225
- )
226
- formatted = "\n\n".join(f"_{p.strip()}_"
227
- for p in ret_prompt.split("\n\n") if p.strip())
228
- caption = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
229
- client.send_media(message_id, chat_id, path, caption, media_type="image")
230
  os.remove(path)
231
  except Exception as e:
232
- logging.warning(f"Image {i}/{count} failed: {e}")
233
- client.send_message(message_id, chat_id,
234
- f"😢 Failed to generate image {i}/{count}.")
235
-
236
- def _fn_voice_reply(message_id, chat_id, prompt):
237
- processed_prompt = f"Say this dialog in a friendly tone:\n\n{prompt}"
238
- result = generate_voice_reply(processed_prompt,
239
- model="openai-audio",
240
- voice="coral",
241
- audio_dir=BotConfig.AUDIO_DIR)
242
- if result and result[0]:
243
- audio_path, _ = result
244
- client.send_media(message_id, chat_id, audio_path, "", media_type="audio")
245
- os.remove(audio_path)
246
  else:
247
- # fallback to text
248
- response = generate_llm(prompt)
249
- client.send_message(message_id, chat_id, response)
250
 
251
- # --- Intent router for fallback ---
252
 
253
  FUNCTION_SCHEMA = {
254
- "generate_image": {
255
- "description": "Generate one or more images",
256
- "params": ["prompt","count"]
257
- },
258
- "send_text": {
259
- "description": "Send a plain text response",
260
- "params": ["message"]
261
- }
 
 
 
 
 
262
  }
263
 
264
  def route_intent(user_input: str):
265
- """
266
- Ask the LLM whether to call a function or just chat.
267
- Expects a JSON response like:
268
- {"action":"generate_image","prompt":"a sunset","count":2}
269
- or
270
- {"action":"send_text","message":"Here's my reply..."}
271
- """
272
  sys_prompt = (
273
- "You are Eve. You can either chat normally or call one of these functions:\n"
274
- + "\n".join(f"- {name}: {info['description']}"
275
- for name,info in FUNCTION_SCHEMA.items())
276
- + "\n\nIf the user wants an image generated, return JSON with "
277
- "\"action\":\"generate_image\",\"prompt\":\"...\",\"count\":<int>.\n"
278
  "Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
279
- "Do NOT wrap your response in any extra text—only raw JSON."
280
  )
281
  raw = generate_llm(f"{sys_prompt}\nUser: {user_input}")
282
  try:
283
  return json.loads(raw)
284
  except:
285
- # fallback: treat entire raw as chat
286
  return {"action":"send_text","message":raw}
287
 
288
- # --- FastAPI App & Webhook ---
289
 
290
  app = FastAPI()
291
  help_text = (
292
- "🤖 *Hi, I'm Eve!* Commands:\n"
293
  "• /help\n"
294
  "• /summarize <text>\n"
295
  "• /translate <lang>|<text>\n"
296
  "• /joke\n"
297
  "• /weather <loc>\n"
 
298
  "• /inspire\n"
 
299
  "• /meme <text>\n"
300
- "• /poll <Q>|<opt1>|… / /results / /endpoll\n"
301
  "• /gen <prompt>|<count>\n"
302
- "Otherwise I’ll chat or generate images for you!"
303
  )
304
 
305
  @app.post("/whatsapp")
306
  async def whatsapp_webhook(request: Request):
307
- global last_message_time
308
- last_message_time = time.time()
309
-
310
- # Auth
311
  if request.headers.get("Authorization") != f"Bearer {BotConfig.WEBHOOK_AUTH_TOKEN}":
312
- raise HTTPException(403, "Unauthorized")
313
 
314
- data = await request.json()
315
- chat_id = data.get("senderData", {}).get("chatId")
316
- if chat_id != BotConfig.BOT_GROUP_CHAT or data.get("typeWebhook") != "incomingMessageReceived":
317
- return {"success": True}
318
 
319
  md = data["messageData"]
320
  mid = data["idMessage"]
321
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
322
  if not tmd:
323
- return {"success": True}
324
- body = tmd.get("textMessage", tmd.get("text", "")).strip()
325
- ctx = tmd.get("contextInfo", {})
326
-
327
- # 1) Quoted‑reply to bot
328
- if md.get("typeMessage") == "quotedMessage":
329
- ext = md["extendedTextMessageData"]
330
- quoted = md["quotedMessage"]
331
- if ext.get("participant") == BotConfig.BOT_JID:
332
- user_reply = ext.get("text", "")
333
- quoted_text = quoted.get("textMessage", "")
334
- prompt = (
335
- f"You asked: {quoted_text}\n"
336
- f"User replied: {user_reply}\n"
337
- "Provide a helpful follow‑up."
338
- )
339
- ans = generate_llm(prompt)
340
- client.send_message(mid, chat_id, ans)
341
- task_queue.put({
342
- "type":"audio","message_id":mid,
343
- "chat_id":chat_id,"prompt":ans
344
- })
345
- return {"success": True}
346
-
347
- # 2) Mentions skip
348
- if ctx.get("mentionedJidList"):
349
- return {"success": True}
350
 
 
351
  low = body.lower()
352
-
353
- # 3) Slash‑commands
354
- if low == "/help":
355
- client.send_message(mid, chat_id, help_text); return {"success": True}
356
  if low.startswith("/summarize "):
357
- _fn_summarize(mid, chat_id, body[11:].strip()); return {"success": True}
358
  if low.startswith("/translate "):
359
- lang, txt = body[11:].split("|",1)
360
- _fn_translate(mid, chat_id, lang.strip(), txt.strip()); return {"success": True}
361
- if low == "/joke":
362
- _fn_joke(mid, chat_id); return {"success": True}
363
  if low.startswith("/weather "):
364
- _fn_weather(mid, chat_id, body[9:].strip().replace(" ","+")); return {"success": True}
365
- if low == "/inspire":
366
- _fn_inspire(mid, chat_id); return {"success": True}
367
  if low.startswith("/meme "):
368
- _fn_meme(mid, chat_id, body[6:].strip()); return {"success": True}
369
  if low.startswith("/poll "):
370
- parts = [p.strip() for p in body[6:].split("|")]
371
- _fn_poll(mid, chat_id, parts[0], parts[1:]); return {"success": True}
372
  if chat_id in polls and low.isdigit():
373
- _fn_poll_vote(mid, chat_id,
374
- data["senderData"]["sender"],
375
- int(low)); return {"success": True}
376
- if low == "/results":
377
- _fn_poll_results(mid, chat_id); return {"success": True}
378
- if low == "/endpoll":
379
- _fn_poll_end(mid, chat_id); return {"success": True}
380
  if low.startswith("/gen"):
381
- parts = body[4:].split("|",1)
382
- prompt = parts[0].strip()
383
- cnt = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
384
- client.send_message(mid, chat_id, f"✨ Generating {cnt} image(s)…")
385
- task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
386
- "prompt":prompt,"num_images":cnt})
387
- return {"success": True}
388
-
389
- # 4) Fallback → function calling router
390
- intent = route_intent(body)
391
- act = intent.get("action")
392
- if act == "generate_image":
393
- pr = intent.get("prompt","")
394
- ct = intent.get("count",1)
395
- client.send_message(mid, chat_id, f"👍 Generating {ct} images for “{pr}”…")
396
- task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
397
- "prompt":pr,"num_images":ct})
 
 
398
  else:
399
- # send_text or any unknown
400
- msg = intent.get("message", "Sorry, I didn't understand.")
401
- client.send_message(mid, chat_id, msg)
402
- task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,
403
- "prompt":msg})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
 
405
- return {"success": True}
406
 
407
- @app.get("/", response_class=PlainTextResponse)
408
  def index():
409
  return "Server is running!"
410
 
411
- if __name__ == "__main__":
412
- client.send_message_to(
413
- BotConfig.BOT_GROUP_CHAT,
414
- "🤖 *Eve is online!* Commands:\n" + help_text
415
- )
416
  import uvicorn
417
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
22
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
23
  WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
24
  BOT_GROUP_CHAT = "[email protected]"
25
+ BOT_JID = os.getenv("BOT_JID")
26
  IMAGE_DIR = "/tmp/images"
27
  AUDIO_DIR = "/tmp/audio"
28
  DEFAULT_IMAGE_COUNT = 4
29
 
30
  @classmethod
31
  def validate(cls):
32
+ missing = [n for n in (
33
+ "GREEN_API_URL","GREEN_API_TOKEN",
34
+ "GREEN_API_ID_INSTANCE","WEBHOOK_AUTH_TOKEN","BOT_JID"
35
+ ) if not getattr(cls, n)]
 
 
 
 
 
36
  if missing:
37
  raise ValueError(f"Missing env vars: {', '.join(missing)}")
38
 
 
43
  logging.basicConfig(level=logging.DEBUG,
44
  format="%(asctime)s [%(levelname)s] %(message)s")
45
 
46
+ def send(self, endpoint, payload, files=None, retries=3):
47
  url = (f"{self.cfg.GREEN_API_URL}/waInstance"
48
  f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
49
  f"{self.cfg.GREEN_API_TOKEN}")
50
+ for i in range(1, retries+1):
51
  try:
52
  resp = self.session.post(
53
  url,
 
58
  resp.raise_for_status()
59
  return resp.json()
60
  except requests.RequestException as e:
61
+ logging.warning(f"{endpoint} attempt {i}/{retries} failed: {e}")
62
+ return {"error":"failed"}
 
 
63
 
64
+ def send_message(self, message_id, chat_id, text):
65
  return self.send("sendMessage", {
66
  "chatId": chat_id,
67
  "message": text,
68
  "quotedMessageId": message_id
69
  })
70
 
71
+ def send_message_to(self, chat_id, text):
72
  return self.send("sendMessage", {
73
  "chatId": chat_id,
74
  "message": text
75
  })
76
 
77
+ def send_media(self, message_id, chat_id, file_path, caption, media_type):
 
78
  endpoint = "sendFileByUpload"
79
  payload = {
80
  "chatId": chat_id,
81
  "caption": caption,
82
  "quotedMessageId": message_id
83
  }
84
+ with open(file_path,"rb") as f:
85
+ mime = "image/jpeg" if media_type=="image" else "audio/mpeg"
86
+ files = [("file",(os.path.basename(file_path),f,mime))]
87
+ return self.send(endpoint,payload,files=files)
88
 
 
89
  BotConfig.validate()
90
  client = BotClient(BotConfig)
91
 
92
+ # --- Threading & Queues ---
93
 
94
+ task_queue = queue.Queue()
95
+ polls = {}
96
+ executor = ThreadPoolExecutor(max_workers=4)
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  def worker():
99
  while True:
100
+ t = task_queue.get()
101
  try:
102
+ if t["type"]=="image":
103
+ _fn_generate_images(t["message_id"],t["chat_id"],t["prompt"],t.get("num_images",1))
104
+ elif t["type"]=="audio":
105
+ _fn_voice_reply(t["message_id"],t["chat_id"],t["prompt"])
 
 
 
 
 
106
  except Exception as e:
107
+ logging.error(f"Worker error {t}: {e}")
108
  finally:
109
  task_queue.task_done()
110
 
111
  for _ in range(4):
112
+ threading.Thread(target=worker,daemon=True).start()
113
 
114
+ # --- Tool Functions ---
115
 
116
+ def _fn_summarize(mid, cid, text):
117
+ s = generate_llm(f"Summarize:\n\n{text}")
118
+ client.send_message(mid,cid,s)
119
 
120
+ def _fn_translate(mid, cid, lang, text):
121
+ r = generate_llm(f"Translate to {lang}:\n\n{text}")
122
+ client.send_message(mid,cid,r)
123
 
124
+ def _fn_joke(mid, cid):
125
  try:
126
+ j = requests.get("https://official-joke-api.appspot.com/random_joke",timeout=5).json()
127
  joke = f"{j['setup']}\n\n{j['punchline']}"
128
  except:
129
+ joke = generate_llm("Tell me a short joke.")
130
+ client.send_message(mid,cid,joke)
131
 
132
+ def _fn_weather(mid,cid,loc):
133
+ raw = requests.get(f"http://sl.wttr.in/{loc}?format=4",timeout=5).text
134
+ r = generate_llm(f"Convert to °C & creative:\n\n{raw}")
135
+ client.send_message(mid,cid,r)
136
+ task_queue.put({"type":"audio","message_id":mid,"chat_id":cid,"prompt":r})
137
+
138
+ def _fn_inspire(mid,cid):
139
+ q = generate_llm("Give me a short inspirational quote.")
140
+ client.send_message(mid,cid,f" {q}")
141
+
142
+ def _fn_meme(mid,cid,txt):
143
+ client.send_message(mid,cid,"🎨 Generating meme…")
144
+ task_queue.put({"type":"image","message_id":mid,"chat_id":cid,"prompt":f"meme: {txt}"})
145
+
146
+ def _fn_poll_create(mid,cid,question,options):
 
 
 
 
 
 
147
  votes = {i+1:0 for i in range(len(options))}
148
+ polls[cid] = {"question":question,"options":options,"votes":votes,"voters":{}}
149
+ text = f"📊 *Poll:* {question}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(options))
150
+ client.send_message(mid,cid,text)
 
 
151
 
152
+ def _fn_poll_vote(mid,cid,voter,choice):
153
+ poll = polls.get(cid)
154
+ if not poll or choice<1 or choice>len(poll["options"]): return
 
155
  prev = poll["voters"].get(voter)
156
+ if prev: poll["votes"][prev]-=1
157
+ poll["votes"][choice]+=1
158
+ poll["voters"][voter]=choice
159
+ client.send_message(mid,cid,f"✅ Voted for {poll['options'][choice-1]}")
160
+
161
+ def _fn_poll_results(mid,cid):
162
+ poll = polls.get(cid)
 
 
163
  if not poll:
164
+ client.send_message(mid,cid,"No active poll.")
165
  return
166
+ txt = f"📊 *Results:* {poll['question']}\n" + "\n".join(
167
  f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
168
  )
169
+ client.send_message(mid,cid,txt)
170
 
171
+ def _fn_poll_end(mid,cid):
172
+ poll = polls.pop(cid,None)
173
  if not poll:
174
+ client.send_message(mid,cid,"No active poll.")
175
  return
176
+ txt = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
177
  f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
178
  )
179
+ client.send_message(mid,cid,txt)
180
 
181
+ def _fn_generate_images(mid,cid,prompt,count=1):
182
+ for i in range(1,count+1):
183
  try:
184
+ img,path,ret_p,url = generate_image(prompt,mid,mid,BotConfig.IMAGE_DIR)
185
+ formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
186
+ cap = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
187
+ client.send_media(mid,cid,path,cap,media_type="image")
 
 
 
188
  os.remove(path)
189
  except Exception as e:
190
+ logging.warning(f"Img {i}/{count} failed: {e}")
191
+ client.send_message(mid,cid,f"😢 Failed to gen image {i}/{count}.")
192
+
193
+ def _fn_send_text(mid,cid,message):
194
+ client.send_message(mid,cid,message)
195
+
196
+ def _fn_voice_reply(mid,cid,prompt):
197
+ res = generate_voice_reply(prompt,model="openai-audio",voice="coral",audio_dir=BotConfig.AUDIO_DIR)
198
+ if res and res[0]:
199
+ path,_ = res
200
+ client.send_media(mid,cid,path,"",media_type="audio")
201
+ os.remove(path)
 
 
202
  else:
203
+ txt = generate_llm(prompt)
204
+ client.send_message(mid,cid,txt)
 
205
 
206
+ # --- Function schema & router ---
207
 
208
  FUNCTION_SCHEMA = {
209
+ "summarize": {"description":"Summarize text","params":["text"]},
210
+ "translate": {"description":"Translate text","params":["lang","text"]},
211
+ "joke": {"description":"Tell a joke","params":[]},
212
+ "weather": {"description":"Creative weather","params":["location"]},
213
+ "weather_poem": {"description":"Poetic weather","params":["location"]},
214
+ "inspire": {"description":"Inspirational quote","params":[]},
215
+ "meme": {"description":"Generate meme","params":["text"]},
216
+ "poll_create": {"description":"Create poll","params":["question","options"]},
217
+ "poll_vote": {"description":"Vote poll","params":["choice"]},
218
+ "poll_results": {"description":"Show poll results","params":[]},
219
+ "poll_end": {"description":"End poll","params":[]},
220
+ "generate_image":{"description":"Generate images","params":["prompt","count"]},
221
+ "send_text": {"description":"Send plain text","params":["message"]}
222
  }
223
 
224
  def route_intent(user_input: str):
 
 
 
 
 
 
 
225
  sys_prompt = (
226
+ "You are Eve. You can either chat or call one of these functions:\n"
227
+ + "\n".join(f"- {n}: {f['description']}" for n,f in FUNCTION_SCHEMA.items())
228
+ + "\n\nTo call a function, return JSON with \"action\":\"<name>\", plus its parameters.\n"
 
 
229
  "Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
230
+ "Return only raw JSON."
231
  )
232
  raw = generate_llm(f"{sys_prompt}\nUser: {user_input}")
233
  try:
234
  return json.loads(raw)
235
  except:
 
236
  return {"action":"send_text","message":raw}
237
 
238
+ # --- FastAPI & Webhook ---
239
 
240
  app = FastAPI()
241
  help_text = (
242
+ "🤖 *Eve* commands:\n"
243
  "• /help\n"
244
  "• /summarize <text>\n"
245
  "• /translate <lang>|<text>\n"
246
  "• /joke\n"
247
  "• /weather <loc>\n"
248
+ "• /weatherpoem <loc>\n"
249
  "• /inspire\n"
250
+ "• /trivia / /answer\n"
251
  "• /meme <text>\n"
252
+ "• /poll <Q>|… / /results / /endpoll\n"
253
  "• /gen <prompt>|<count>\n"
254
+ "Otherwise chat freely or reply to one of my messages to invoke tools."
255
  )
256
 
257
  @app.post("/whatsapp")
258
  async def whatsapp_webhook(request: Request):
259
+ data = await request.json()
 
 
 
260
  if request.headers.get("Authorization") != f"Bearer {BotConfig.WEBHOOK_AUTH_TOKEN}":
261
+ raise HTTPException(403,"Unauthorized")
262
 
263
+ chat_id = data["senderData"]["chatId"]
264
+ if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"]!="incomingMessageReceived":
265
+ return {"success":True}
 
266
 
267
  md = data["messageData"]
268
  mid = data["idMessage"]
269
  tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
270
  if not tmd:
271
+ return {"success":True}
272
+ body = (tmd.get("textMessage") or tmd.get("text","")).strip()
273
+ ctx = tmd.get("contextInfo",{})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
+ # Slash commands
276
  low = body.lower()
277
+ if low=="/help":
278
+ client.send_message(mid,chat_id,help_text); return {"success":True}
 
 
279
  if low.startswith("/summarize "):
280
+ _fn_summarize(mid,chat_id,body[11:].strip()); return {"success":True}
281
  if low.startswith("/translate "):
282
+ lang,txt = body[11:].split("|",1)
283
+ _fn_translate(mid,chat_id,lang.strip(),txt.strip()); return {"success":True}
284
+ if low=="/joke":
285
+ _fn_joke(mid,chat_id); return {"success":True}
286
  if low.startswith("/weather "):
287
+ _fn_weather(mid,chat_id,body[9:].strip().replace(" ","+")); return {"success":True}
288
+ if low=="/inspire":
289
+ _fn_inspire(mid,chat_id); return {"success":True}
290
  if low.startswith("/meme "):
291
+ _fn_meme(mid,chat_id,body[6:].strip()); return {"success":True}
292
  if low.startswith("/poll "):
293
+ parts=[p.strip() for p in body[6:].split("|")]
294
+ _fn_poll_create(mid,chat_id,parts[0],parts[1:]); return {"success":True}
295
  if chat_id in polls and low.isdigit():
296
+ _fn_poll_vote(mid,chat_id,data["senderData"]["sender"],int(low)); return {"success":True}
297
+ if low=="/results":
298
+ _fn_poll_results(mid,chat_id); return {"success":True}
299
+ if low=="/endpoll":
300
+ _fn_poll_end(mid,chat_id); return {"success":True}
 
 
301
  if low.startswith("/gen"):
302
+ parts=body[4:].split("|",1)
303
+ pr=parts[0].strip()
304
+ ct=int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
305
+ client.send_message(mid,chat_id,f"✨ Generating {ct} images…")
306
+ task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,"prompt":pr,"num_images":ct})
307
+ return {"success":True}
308
+
309
+ # Skip mentions
310
+ if ctx.get("mentionedJidList"):
311
+ return {"success":True}
312
+
313
+ # Build effective_text (include quoted if replying to bot)
314
+ if md.get("typeMessage")=="quotedMessage":
315
+ ext=md["extendedTextMessageData"]
316
+ quoted=md["quotedMessage"]
317
+ if ext.get("participant")==BotConfig.BOT_JID:
318
+ effective = f"Quoted: {quoted.get('textMessage','')}\nUser: {ext.get('text','')}"
319
+ else:
320
+ effective = body
321
  else:
322
+ effective = body
323
+
324
+ # Route intent across all tools
325
+ intent = route_intent(effective)
326
+ action = intent.get("action")
327
+
328
+ dispatch = {
329
+ "summarize": lambda: _fn_summarize(mid,chat_id,intent["text"]),
330
+ "translate": lambda: _fn_translate(mid,chat_id,intent["lang"],intent["text"]),
331
+ "joke": lambda: _fn_joke(mid,chat_id),
332
+ "weather": lambda: _fn_weather(mid,chat_id,intent["location"]),
333
+ "inspire": lambda: _fn_inspire(mid,chat_id),
334
+ "meme": lambda: _fn_meme(mid,chat_id,intent["text"]),
335
+ "poll_create": lambda: _fn_poll_create(mid,chat_id,intent["question"],intent["options"]),
336
+ "poll_vote": lambda: _fn_poll_vote(mid,chat_id,data["senderData"]["sender"],intent["choice"]),
337
+ "poll_results": lambda: _fn_poll_results(mid,chat_id),
338
+ "poll_end": lambda: _fn_poll_end(mid,chat_id),
339
+ "generate_image":lambda: _fn_generate_images(mid,chat_id,intent["prompt"],intent.get("count",1)),
340
+ "send_text": lambda: _fn_send_text(mid,chat_id,intent["message"]),
341
+ }
342
+
343
+ if action in dispatch:
344
+ dispatch[action]()
345
+ else:
346
+ # fallback chat
347
+ txt = intent.get("message","Sorry, I didn't get that.")
348
+ _fn_send_text(mid,chat_id,txt)
349
+ task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":txt})
350
 
351
+ return {"success":True}
352
 
353
+ @app.get("/",response_class=PlainTextResponse)
354
  def index():
355
  return "Server is running!"
356
 
357
+ if __name__=="__main__":
358
+ client.send_message_to(BotConfig.BOT_GROUP_CHAT,
359
+ "🌟 Eve is online! Type /help to see commands.")
 
 
360
  import uvicorn
361
+ uvicorn.run(app,host="0.0.0.0",port=7860)
polLLM.py CHANGED
@@ -18,7 +18,7 @@ def pre_process():
18
  char = config['llm']['char']
19
  return system_prompt.replace("{char}", char)
20
 
21
- def generate_llm(prompt, model="openai-large", max_tokens=100):
22
  system_prompt = pre_process()
23
 
24
  try:
@@ -40,5 +40,6 @@ def generate_llm(prompt, model="openai-large", max_tokens=100):
40
 
41
  # Example usage (can be removed or commented out in production):
42
  if __name__ == "__main__":
43
- sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
 
44
  print("Response:", generate_llm(sample_prompt))
 
18
  char = config['llm']['char']
19
  return system_prompt.replace("{char}", char)
20
 
21
+ def generate_llm(prompt, model="openai-large", max_tokens=8000):
22
  system_prompt = pre_process()
23
 
24
  try:
 
40
 
41
  # Example usage (can be removed or commented out in production):
42
  if __name__ == "__main__":
43
+ #sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
44
+ sample_prompt = "search for free image generation api"
45
  print("Response:", generate_llm(sample_prompt))