Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
Β·
3ad83d3
1
Parent(s):
8deb81b
Enhance configuration to support function calling for image generation and text replies; improve help text for user commands.
Browse files- app.py +273 -231
- config.yaml +49 -1
app.py
CHANGED
@@ -22,7 +22,7 @@ class BotConfig:
|
|
22 |
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
|
23 |
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
|
24 |
BOT_GROUP_CHAT = "[email protected]"
|
25 |
-
BOT_JID = os.getenv("BOT_JID")
|
26 |
IMAGE_DIR = "/tmp/images"
|
27 |
AUDIO_DIR = "/tmp/audio"
|
28 |
DEFAULT_IMAGE_COUNT = 4
|
@@ -39,16 +39,19 @@ class BotConfig:
|
|
39 |
) if not getattr(cls, name)
|
40 |
]
|
41 |
if missing:
|
42 |
-
raise ValueError(f"
|
43 |
|
44 |
class BotClient:
|
45 |
def __init__(self, cfg: BotConfig):
|
46 |
self.cfg = cfg
|
47 |
self.session = requests.Session()
|
48 |
-
logging.basicConfig(level=logging.DEBUG,
|
|
|
49 |
|
50 |
def send(self, endpoint: str, payload: dict, files=None, retries=3):
|
51 |
-
url = f"{self.cfg.GREEN_API_URL}/waInstance
|
|
|
|
|
52 |
for attempt in range(1, retries + 1):
|
53 |
try:
|
54 |
resp = self.session.post(
|
@@ -66,16 +69,26 @@ class BotClient:
|
|
66 |
return {"error": str(e)}
|
67 |
|
68 |
def send_message(self, message_id: str, chat_id: str, text: str):
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
71 |
|
72 |
def send_message_to(self, chat_id: str, text: str):
|
73 |
-
|
74 |
-
|
|
|
|
|
75 |
|
76 |
-
def send_media(self, message_id: str, chat_id: str, file_path: str,
|
|
|
77 |
endpoint = "sendFileByUpload"
|
78 |
-
payload = {
|
|
|
|
|
|
|
|
|
79 |
with open(file_path, "rb") as f:
|
80 |
mime = "image/jpeg" if media_type == "image" else "audio/mpeg"
|
81 |
files = [("file", (os.path.basename(file_path), f, mime))]
|
@@ -85,7 +98,7 @@ class BotClient:
|
|
85 |
BotConfig.validate()
|
86 |
client = BotClient(BotConfig)
|
87 |
|
88 |
-
# ---
|
89 |
|
90 |
task_queue = queue.Queue()
|
91 |
trivia_store = {}
|
@@ -111,90 +124,222 @@ def worker():
|
|
111 |
task = task_queue.get()
|
112 |
try:
|
113 |
if task["type"] == "image":
|
114 |
-
|
|
|
|
|
|
|
115 |
elif task["type"] == "audio":
|
116 |
-
|
|
|
|
|
117 |
except Exception as e:
|
118 |
-
logging.error(f"
|
119 |
finally:
|
120 |
task_queue.task_done()
|
121 |
|
122 |
for _ in range(4):
|
123 |
threading.Thread(target=worker, daemon=True).start()
|
124 |
|
125 |
-
# ---
|
126 |
|
127 |
-
def
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
|
135 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
try:
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
try:
|
156 |
img, path, ret_prompt, url = generate_image(
|
157 |
prompt, message_id, message_id, BotConfig.IMAGE_DIR
|
158 |
)
|
159 |
-
|
160 |
-
|
161 |
-
formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_prompt.split("\n\n") if p.strip())
|
162 |
caption = f"β¨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
|
163 |
client.send_media(message_id, chat_id, path, caption, media_type="image")
|
164 |
os.remove(path)
|
165 |
except Exception as e:
|
166 |
logging.warning(f"Image {i}/{count} failed: {e}")
|
167 |
-
client.send_message(message_id, chat_id,
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
)
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
178 |
|
179 |
# --- FastAPI App & Webhook ---
|
180 |
|
181 |
app = FastAPI()
|
182 |
-
|
183 |
help_text = (
|
184 |
-
"π€ *Hi
|
185 |
-
"β’
|
186 |
-
"β’
|
187 |
-
"β’
|
188 |
-
"β’
|
189 |
-
"β’
|
190 |
-
"β’
|
191 |
-
"β’
|
192 |
-
"β’
|
193 |
-
"β’
|
194 |
-
"β’
|
195 |
-
"β’
|
196 |
-
"
|
197 |
-
"Any other text β voice reply."
|
198 |
)
|
199 |
|
200 |
@app.post("/whatsapp")
|
@@ -207,205 +352,102 @@ async def whatsapp_webhook(request: Request):
|
|
207 |
raise HTTPException(403, "Unauthorized")
|
208 |
|
209 |
data = await request.json()
|
210 |
-
logging.debug(f"Received data: {data}")
|
211 |
chat_id = data.get("senderData", {}).get("chatId")
|
212 |
if chat_id != BotConfig.BOT_GROUP_CHAT or data.get("typeWebhook") != "incomingMessageReceived":
|
213 |
return {"success": True}
|
214 |
|
215 |
-
md
|
216 |
-
mid
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
-
#
|
219 |
if md.get("typeMessage") == "quotedMessage":
|
220 |
-
ext = md
|
221 |
-
quoted = md
|
222 |
-
|
223 |
-
# only if the quoted message was from the bot
|
224 |
-
if quoted_participant == BotConfig.BOT_JID:
|
225 |
user_reply = ext.get("text", "")
|
226 |
quoted_text = quoted.get("textMessage", "")
|
227 |
prompt = (
|
228 |
f"You asked: {quoted_text}\n"
|
229 |
f"User replied: {user_reply}\n"
|
230 |
-
"Provide a helpful
|
231 |
)
|
232 |
-
|
233 |
-
client.send_message(mid, chat_id,
|
234 |
-
|
235 |
-
|
|
|
|
|
236 |
return {"success": True}
|
237 |
|
238 |
-
#
|
239 |
-
text_data = md.get("textMessageData") or md.get("extendedTextMessageData")
|
240 |
-
if not text_data:
|
241 |
-
return {"success": True}
|
242 |
-
body = text_data.get("textMessage", text_data.get("text", "")).strip()
|
243 |
-
ctx = text_data.get("contextInfo", {})
|
244 |
if ctx.get("mentionedJidList"):
|
245 |
return {"success": True}
|
246 |
|
247 |
low = body.lower()
|
248 |
|
249 |
-
#
|
250 |
if low == "/help":
|
251 |
-
client.send_message(mid, chat_id, help_text)
|
252 |
-
return {"success": True}
|
253 |
-
|
254 |
if low.startswith("/summarize "):
|
255 |
-
|
256 |
-
client.send_message(mid, chat_id, summary)
|
257 |
-
return {"success": True}
|
258 |
-
|
259 |
if low.startswith("/translate "):
|
260 |
-
|
261 |
-
|
262 |
-
client.send_message(mid, chat_id, "Please use `/translate <language>|<text>`")
|
263 |
-
else:
|
264 |
-
lang, txt = part.split("|", 1)
|
265 |
-
resp = generate_llm(f"Translate the following into {lang.strip()}:\n\n{txt.strip()}")
|
266 |
-
client.send_message(mid, chat_id, resp)
|
267 |
-
return {"success": True}
|
268 |
-
|
269 |
if low == "/joke":
|
270 |
-
|
271 |
-
joke = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
|
272 |
-
client.send_message(mid, chat_id, f"{joke['setup']}\n\n{joke['punchline']}")
|
273 |
-
except:
|
274 |
-
client.send_message(mid, chat_id, generate_llm("Tell me a short, funny joke."))
|
275 |
-
return {"success": True}
|
276 |
-
|
277 |
if low.startswith("/weather "):
|
278 |
-
|
279 |
-
try:
|
280 |
-
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
281 |
-
prompt = (
|
282 |
-
f"Convert this weather report into Celsius and craft a short, creative, "
|
283 |
-
f"beautiful weather report with emojis:\n\n{raw}"
|
284 |
-
)
|
285 |
-
report = generate_llm(prompt)
|
286 |
-
client.send_message(mid, chat_id, report)
|
287 |
-
voice_prompt = f"Provide only the following weather report as speech: {report}"
|
288 |
-
task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
|
289 |
-
except:
|
290 |
-
client.send_message(mid, chat_id, "Could not fetch weather.")
|
291 |
-
return {"success": True}
|
292 |
-
|
293 |
if low.startswith("/weatherpoem "):
|
294 |
-
|
295 |
-
try:
|
296 |
-
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
297 |
-
poem = generate_llm(f"Write a short, poetic weather summary in Celsius based on this:\n\n{raw}")
|
298 |
-
client.send_message(mid, chat_id, poem)
|
299 |
-
voice_prompt = f"Speak only this poetic weather summary: {poem}"
|
300 |
-
task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,"prompt":voice_prompt})
|
301 |
-
except:
|
302 |
-
client.send_message(mid, chat_id, "Could not fetch weather.")
|
303 |
-
return {"success": True}
|
304 |
-
|
305 |
if low == "/inspire":
|
306 |
-
|
307 |
-
client.send_message(mid, chat_id, f"β¨ {quote}")
|
308 |
-
return {"success": True}
|
309 |
-
|
310 |
if low == "/trivia":
|
311 |
-
|
312 |
-
f"Generate a unique trivia Q&A in JSON:\n"
|
313 |
-
'{"question":"...","answer":"..."}'
|
314 |
-
)
|
315 |
-
try:
|
316 |
-
obj = json.loads(raw.strip().strip("```json").strip("```"))
|
317 |
-
trivia_store[chat_id] = obj
|
318 |
-
client.send_message(mid, chat_id, f"β {obj['question']}\nReply `/answer` or `/answer your guess`.")
|
319 |
-
except:
|
320 |
-
client.send_message(mid, chat_id, "Failed to generate trivia.")
|
321 |
-
return {"success": True}
|
322 |
-
|
323 |
if low.startswith("/answer"):
|
324 |
-
|
325 |
-
if chat_id in trivia_store:
|
326 |
-
qa = trivia_store.pop(chat_id)
|
327 |
-
if resp:
|
328 |
-
verdict = generate_llm(
|
329 |
-
f"Q: {qa['question']}\nCorrect: {qa['answer']}\nUser: {resp}\nCorrect?"
|
330 |
-
)
|
331 |
-
client.send_message(mid, chat_id, verdict)
|
332 |
-
else:
|
333 |
-
client.send_message(mid, chat_id, f"π‘ Answer: {qa['answer']}")
|
334 |
-
else:
|
335 |
-
client.send_message(mid, chat_id, "No active trivia. `/trivia` to start.")
|
336 |
-
return {"success": True}
|
337 |
-
|
338 |
if low.startswith("/meme "):
|
339 |
-
|
340 |
-
task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,"prompt":f"meme: {body[6:].strip()}"})
|
341 |
-
return {"success": True}
|
342 |
-
|
343 |
if low.startswith("/poll "):
|
344 |
-
parts = body[6:].split("|")
|
345 |
-
|
346 |
-
client.send_message(mid, chat_id, "Use `/poll Q|A|B`")
|
347 |
-
else:
|
348 |
-
q, *opts = [p.strip() for p in parts]
|
349 |
-
polls[chat_id] = {"question":q,"options":opts,"votes":{i+1:0 for i in range(len(opts))},"voters":{}}
|
350 |
-
text = f"π *Poll:* {q}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(opts))
|
351 |
-
client.send_message(mid, chat_id, text)
|
352 |
-
return {"success": True}
|
353 |
-
|
354 |
if chat_id in polls and low.isdigit():
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
prev = poll["voters"].get(data["senderData"].get("sender"))
|
359 |
-
if prev:
|
360 |
-
poll["votes"][prev] -= 1
|
361 |
-
poll["votes"][n] += 1
|
362 |
-
poll["voters"][data["senderData"].get("sender")] = n
|
363 |
-
client.send_message(mid, chat_id, f"β
Voted for {poll['options'][n-1]}")
|
364 |
-
return {"success": True}
|
365 |
-
|
366 |
if low == "/results":
|
367 |
-
|
368 |
-
p = polls[chat_id]
|
369 |
-
text = f"π *Results:* {p['question']}\n" + "\n".join(
|
370 |
-
f"{i}. {o}: {p['votes'][i]}" for i,o in enumerate(p["options"],1)
|
371 |
-
)
|
372 |
-
client.send_message(mid, chat_id, text)
|
373 |
-
else:
|
374 |
-
client.send_message(mid, chat_id, "No active poll.")
|
375 |
-
return {"success": True}
|
376 |
-
|
377 |
if low == "/endpoll":
|
378 |
-
|
379 |
-
p = polls.pop(chat_id)
|
380 |
-
text = f"π *Final Results:* {p['question']}\n" + "\n".join(
|
381 |
-
f"{i}. {o}: {p['votes'][i]}" for i,o in enumerate(p["options"],1)
|
382 |
-
)
|
383 |
-
client.send_message(mid, chat_id, text)
|
384 |
-
else:
|
385 |
-
client.send_message(mid, chat_id, "No active poll.")
|
386 |
-
return {"success": True}
|
387 |
-
|
388 |
if low.startswith("/gen"):
|
389 |
-
parts = body[4:].split("|",
|
390 |
prompt = parts[0].strip()
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
client.send_message(mid, chat_id, "Use `/gen <prompt>|<count>`")
|
396 |
-
else:
|
397 |
-
client.send_message(mid, chat_id, f"β¨ Generating {count} image(s)...")
|
398 |
-
task_queue.put({
|
399 |
-
"type":"image","message_id":mid,"chat_id":chat_id,
|
400 |
-
"prompt":prompt,"num_images":count
|
401 |
-
})
|
402 |
return {"success": True}
|
403 |
|
404 |
-
# Fallback β
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
409 |
return {"success": True}
|
410 |
|
411 |
@app.get("/", response_class=PlainTextResponse)
|
|
|
22 |
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
|
23 |
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
|
24 |
BOT_GROUP_CHAT = "[email protected]"
|
25 |
+
BOT_JID = os.getenv("BOT_JID") # your bot's own WhatsApp JID
|
26 |
IMAGE_DIR = "/tmp/images"
|
27 |
AUDIO_DIR = "/tmp/audio"
|
28 |
DEFAULT_IMAGE_COUNT = 4
|
|
|
39 |
) if not getattr(cls, name)
|
40 |
]
|
41 |
if missing:
|
42 |
+
raise ValueError(f"Missing env vars: {', '.join(missing)}")
|
43 |
|
44 |
class BotClient:
|
45 |
def __init__(self, cfg: BotConfig):
|
46 |
self.cfg = cfg
|
47 |
self.session = requests.Session()
|
48 |
+
logging.basicConfig(level=logging.DEBUG,
|
49 |
+
format="%(asctime)s [%(levelname)s] %(message)s")
|
50 |
|
51 |
def send(self, endpoint: str, payload: dict, files=None, retries=3):
|
52 |
+
url = (f"{self.cfg.GREEN_API_URL}/waInstance"
|
53 |
+
f"{self.cfg.GREEN_API_ID_INSTANCE}/{endpoint}/"
|
54 |
+
f"{self.cfg.GREEN_API_TOKEN}")
|
55 |
for attempt in range(1, retries + 1):
|
56 |
try:
|
57 |
resp = self.session.post(
|
|
|
69 |
return {"error": str(e)}
|
70 |
|
71 |
def send_message(self, message_id: str, chat_id: str, text: str):
|
72 |
+
return self.send("sendMessage", {
|
73 |
+
"chatId": chat_id,
|
74 |
+
"message": text,
|
75 |
+
"quotedMessageId": message_id
|
76 |
+
})
|
77 |
|
78 |
def send_message_to(self, chat_id: str, text: str):
|
79 |
+
return self.send("sendMessage", {
|
80 |
+
"chatId": chat_id,
|
81 |
+
"message": text
|
82 |
+
})
|
83 |
|
84 |
+
def send_media(self, message_id: str, chat_id: str, file_path: str,
|
85 |
+
caption: str, media_type: str):
|
86 |
endpoint = "sendFileByUpload"
|
87 |
+
payload = {
|
88 |
+
"chatId": chat_id,
|
89 |
+
"caption": caption,
|
90 |
+
"quotedMessageId": message_id
|
91 |
+
}
|
92 |
with open(file_path, "rb") as f:
|
93 |
mime = "image/jpeg" if media_type == "image" else "audio/mpeg"
|
94 |
files = [("file", (os.path.basename(file_path), f, mime))]
|
|
|
98 |
BotConfig.validate()
|
99 |
client = BotClient(BotConfig)
|
100 |
|
101 |
+
# --- Threading, Queues, Stores ---
|
102 |
|
103 |
task_queue = queue.Queue()
|
104 |
trivia_store = {}
|
|
|
124 |
task = task_queue.get()
|
125 |
try:
|
126 |
if task["type"] == "image":
|
127 |
+
_fn_generate_images(task["message_id"],
|
128 |
+
task["chat_id"],
|
129 |
+
task["prompt"],
|
130 |
+
task.get("num_images", 1))
|
131 |
elif task["type"] == "audio":
|
132 |
+
_fn_voice_reply(task["message_id"],
|
133 |
+
task["chat_id"],
|
134 |
+
task["prompt"])
|
135 |
except Exception as e:
|
136 |
+
logging.error(f"Worker error {task}: {e}")
|
137 |
finally:
|
138 |
task_queue.task_done()
|
139 |
|
140 |
for _ in range(4):
|
141 |
threading.Thread(target=worker, daemon=True).start()
|
142 |
|
143 |
+
# --- Primitive βtoolβ functions ---
|
144 |
|
145 |
+
def _fn_summarize(message_id, chat_id, text):
|
146 |
+
summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{text}")
|
147 |
+
client.send_message(message_id, chat_id, summary)
|
148 |
+
|
149 |
+
def _fn_translate(message_id, chat_id, lang, text):
|
150 |
+
resp = generate_llm(f"Translate the following into {lang}:\n\n{text}")
|
151 |
+
client.send_message(message_id, chat_id, resp)
|
152 |
|
153 |
+
def _fn_joke(message_id, chat_id):
|
154 |
+
try:
|
155 |
+
j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
|
156 |
+
joke = f"{j['setup']}\n\n{j['punchline']}"
|
157 |
+
except:
|
158 |
+
joke = generate_llm("Tell me a short, funny joke.")
|
159 |
+
client.send_message(message_id, chat_id, joke)
|
160 |
+
|
161 |
+
def _fn_weather(message_id, chat_id, loc):
|
162 |
+
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
163 |
+
report = generate_llm(
|
164 |
+
f"Convert this weather report into Celsius and craft a short, creative report:\n\n{raw}"
|
165 |
+
)
|
166 |
+
client.send_message(message_id, chat_id, report)
|
167 |
+
task_queue.put({
|
168 |
+
"type":"audio","message_id":message_id,"chat_id":chat_id,
|
169 |
+
"prompt":f"Speak only this weather report: {report}"
|
170 |
+
})
|
171 |
+
|
172 |
+
def _fn_weather_poem(message_id, chat_id, loc):
|
173 |
+
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
174 |
+
poem = generate_llm(
|
175 |
+
f"Write a short, poetic weather summary in Celsius based on:\n\n{raw}"
|
176 |
+
)
|
177 |
+
client.send_message(message_id, chat_id, poem)
|
178 |
+
task_queue.put({
|
179 |
+
"type":"audio","message_id":message_id,"chat_id":chat_id,
|
180 |
+
"prompt":f"Speak only this poetic weather summary: {poem}"
|
181 |
+
})
|
182 |
+
|
183 |
+
def _fn_inspire(message_id, chat_id):
|
184 |
+
quote = generate_llm(f"Give me a short inspirational unique quote.")
|
185 |
+
client.send_message(message_id, chat_id, f"β¨ {quote}")
|
186 |
+
|
187 |
+
def _fn_trivia(message_id, chat_id):
|
188 |
+
raw = generate_llm(
|
189 |
+
f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
|
190 |
+
)
|
191 |
try:
|
192 |
+
obj = json.loads(raw.strip().strip("```json").strip("```"))
|
193 |
+
trivia_store[chat_id] = obj
|
194 |
+
client.send_message(
|
195 |
+
message_id, chat_id,
|
196 |
+
f"β {obj['question']}\nReply `/answer` or `/answer your guess`."
|
197 |
+
)
|
198 |
+
except:
|
199 |
+
client.send_message(message_id, chat_id, "Failed to generate trivia.")
|
200 |
+
|
201 |
+
def _fn_answer(message_id, chat_id, guess):
|
202 |
+
if chat_id not in trivia_store:
|
203 |
+
client.send_message(message_id, chat_id, "No active trivia. `/trivia` to start.")
|
204 |
+
return
|
205 |
+
qa = trivia_store.pop(chat_id)
|
206 |
+
if guess:
|
207 |
+
verdict = generate_llm(
|
208 |
+
f"Q: {qa['question']}\nCorrect: {qa['answer']}\nUser: {guess}\nCorrect?"
|
209 |
+
)
|
210 |
+
client.send_message(message_id, chat_id, verdict)
|
211 |
+
else:
|
212 |
+
client.send_message(message_id, chat_id, f"π‘ Answer: {qa['answer']}")
|
213 |
+
|
214 |
+
def _fn_meme(message_id, chat_id, txt):
|
215 |
+
client.send_message(message_id, chat_id, "π¨ Generating your meme...")
|
216 |
+
task_queue.put({"type":"image","message_id":message_id,
|
217 |
+
"chat_id":chat_id,"prompt":f"meme: {txt}"})
|
218 |
+
|
219 |
+
def _fn_poll(message_id, chat_id, question, options):
|
220 |
+
votes = {i+1:0 for i in range(len(options))}
|
221 |
+
polls[chat_id] = {"question":question,"options":options,"votes":votes,"voters":{}}
|
222 |
+
text = f"π *Poll:* {question}\n" + "\n".join(
|
223 |
+
f"{i+1}. {o}" for i,o in enumerate(options)
|
224 |
+
)
|
225 |
+
client.send_message(message_id, chat_id, text)
|
226 |
+
|
227 |
+
def _fn_poll_vote(message_id, chat_id, voter, choice):
|
228 |
+
poll = polls.get(chat_id)
|
229 |
+
if not poll or choice < 1 or choice > len(poll["options"]):
|
230 |
+
return
|
231 |
+
prev = poll["voters"].get(voter)
|
232 |
+
if prev:
|
233 |
+
poll["votes"][prev] -= 1
|
234 |
+
poll["votes"][choice] += 1
|
235 |
+
poll["voters"][voter] = choice
|
236 |
+
client.send_message(message_id, chat_id,
|
237 |
+
f"β
Voted for {poll['options'][choice-1]}")
|
238 |
+
|
239 |
+
def _fn_poll_results(message_id, chat_id):
|
240 |
+
poll = polls.get(chat_id)
|
241 |
+
if not poll:
|
242 |
+
client.send_message(message_id, chat_id, "No active poll.")
|
243 |
+
return
|
244 |
+
text = f"π *Results:* {poll['question']}\n" + "\n".join(
|
245 |
+
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
246 |
+
)
|
247 |
+
client.send_message(message_id, chat_id, text)
|
248 |
+
|
249 |
+
def _fn_poll_end(message_id, chat_id):
|
250 |
+
poll = polls.pop(chat_id, None)
|
251 |
+
if not poll:
|
252 |
+
client.send_message(message_id, chat_id, "No active poll.")
|
253 |
+
return
|
254 |
+
text = f"π *Final Results:* {poll['question']}\n" + "\n".join(
|
255 |
+
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
256 |
+
)
|
257 |
+
client.send_message(message_id, chat_id, text)
|
258 |
+
|
259 |
+
def _fn_generate_images(message_id, chat_id, prompt, count=1):
|
260 |
+
for i in range(1, count+1):
|
261 |
try:
|
262 |
img, path, ret_prompt, url = generate_image(
|
263 |
prompt, message_id, message_id, BotConfig.IMAGE_DIR
|
264 |
)
|
265 |
+
formatted = "\n\n".join(f"_{p.strip()}_"
|
266 |
+
for p in ret_prompt.split("\n\n") if p.strip())
|
|
|
267 |
caption = f"β¨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
|
268 |
client.send_media(message_id, chat_id, path, caption, media_type="image")
|
269 |
os.remove(path)
|
270 |
except Exception as e:
|
271 |
logging.warning(f"Image {i}/{count} failed: {e}")
|
272 |
+
client.send_message(message_id, chat_id,
|
273 |
+
f"π’ Failed to generate image {i}/{count}.")
|
274 |
+
|
275 |
+
def _fn_voice_reply(message_id, chat_id, prompt):
|
276 |
+
result = generate_voice_reply(prompt,
|
277 |
+
model="openai-audio",
|
278 |
+
voice="coral",
|
279 |
+
audio_dir=BotConfig.AUDIO_DIR)
|
280 |
+
if result and result[0]:
|
281 |
+
audio_path, _ = result
|
282 |
+
client.send_media(message_id, chat_id, audio_path, "", media_type="audio")
|
283 |
+
os.remove(audio_path)
|
284 |
+
else:
|
285 |
+
# fallback to text
|
286 |
+
response = generate_llm(prompt)
|
287 |
+
client.send_message(message_id, chat_id, response)
|
288 |
+
|
289 |
+
# --- Intent router for fallback ---
|
290 |
+
|
291 |
+
FUNCTION_SCHEMA = {
|
292 |
+
"generate_image": {
|
293 |
+
"description": "Generate one or more images",
|
294 |
+
"params": ["prompt","count"]
|
295 |
+
},
|
296 |
+
"send_text": {
|
297 |
+
"description": "Send a plain text response",
|
298 |
+
"params": ["message"]
|
299 |
+
}
|
300 |
+
}
|
301 |
+
|
302 |
+
def route_intent(user_input: str):
|
303 |
+
"""
|
304 |
+
Ask the LLM whether to call a function or just chat.
|
305 |
+
Expects a JSON response like:
|
306 |
+
{"action":"generate_image","prompt":"a sunset","count":2}
|
307 |
+
or
|
308 |
+
{"action":"send_text","message":"Here's my reply..."}
|
309 |
+
"""
|
310 |
+
sys_prompt = (
|
311 |
+
"You are Eve. You can either chat normally or call one of these functions:\n"
|
312 |
+
+ "\n".join(f"- {name}: {info['description']}"
|
313 |
+
for name,info in FUNCTION_SCHEMA.items())
|
314 |
+
+ "\n\nIf the user wants an image generated, return JSON with "
|
315 |
+
"\"action\":\"generate_image\",\"prompt\":\"...\",\"count\":<int>.\n"
|
316 |
+
"Otherwise return JSON with \"action\":\"send_text\",\"message\":\"...\".\n"
|
317 |
+
"Do NOT wrap your response in any extra textβonly raw JSON."
|
318 |
)
|
319 |
+
raw = generate_llm(f"{sys_prompt}\nUser: {user_input}")
|
320 |
+
try:
|
321 |
+
return json.loads(raw)
|
322 |
+
except:
|
323 |
+
# fallback: treat entire raw as chat
|
324 |
+
return {"action":"send_text","message":raw}
|
325 |
|
326 |
# --- FastAPI App & Webhook ---
|
327 |
|
328 |
app = FastAPI()
|
|
|
329 |
help_text = (
|
330 |
+
"π€ *Hi, I'm Eve!* Commands:\n"
|
331 |
+
"β’ /help\n"
|
332 |
+
"β’ /summarize <text>\n"
|
333 |
+
"β’ /translate <lang>|<text>\n"
|
334 |
+
"β’ /joke\n"
|
335 |
+
"β’ /weather <loc>\n"
|
336 |
+
"β’ /weatherpoem <loc>\n"
|
337 |
+
"β’ /inspire\n"
|
338 |
+
"β’ /trivia / /answer\n"
|
339 |
+
"β’ /meme <text>\n"
|
340 |
+
"β’ /poll <Q>|<opt1>|β¦ / /results / /endpoll\n"
|
341 |
+
"β’ /gen <prompt>|<count>\n"
|
342 |
+
"Otherwise Iβll chat or generate images for you!"
|
|
|
343 |
)
|
344 |
|
345 |
@app.post("/whatsapp")
|
|
|
352 |
raise HTTPException(403, "Unauthorized")
|
353 |
|
354 |
data = await request.json()
|
|
|
355 |
chat_id = data.get("senderData", {}).get("chatId")
|
356 |
if chat_id != BotConfig.BOT_GROUP_CHAT or data.get("typeWebhook") != "incomingMessageReceived":
|
357 |
return {"success": True}
|
358 |
|
359 |
+
md = data["messageData"]
|
360 |
+
mid = data["idMessage"]
|
361 |
+
tmd = md.get("textMessageData") or md.get("extendedTextMessageData")
|
362 |
+
if not tmd:
|
363 |
+
return {"success": True}
|
364 |
+
body = tmd.get("textMessage", tmd.get("text", "")).strip()
|
365 |
+
ctx = tmd.get("contextInfo", {})
|
366 |
|
367 |
+
# 1) Quotedβreply to bot
|
368 |
if md.get("typeMessage") == "quotedMessage":
|
369 |
+
ext = md["extendedTextMessageData"]
|
370 |
+
quoted = md["quotedMessage"]
|
371 |
+
if ext.get("participant") == BotConfig.BOT_JID:
|
|
|
|
|
372 |
user_reply = ext.get("text", "")
|
373 |
quoted_text = quoted.get("textMessage", "")
|
374 |
prompt = (
|
375 |
f"You asked: {quoted_text}\n"
|
376 |
f"User replied: {user_reply}\n"
|
377 |
+
"Provide a helpful followβup."
|
378 |
)
|
379 |
+
ans = generate_llm(prompt)
|
380 |
+
client.send_message(mid, chat_id, ans)
|
381 |
+
task_queue.put({
|
382 |
+
"type":"audio","message_id":mid,
|
383 |
+
"chat_id":chat_id,"prompt":ans
|
384 |
+
})
|
385 |
return {"success": True}
|
386 |
|
387 |
+
# 2) Mentions skip
|
|
|
|
|
|
|
|
|
|
|
388 |
if ctx.get("mentionedJidList"):
|
389 |
return {"success": True}
|
390 |
|
391 |
low = body.lower()
|
392 |
|
393 |
+
# 3) Slashβcommands
|
394 |
if low == "/help":
|
395 |
+
client.send_message(mid, chat_id, help_text); return {"success": True}
|
|
|
|
|
396 |
if low.startswith("/summarize "):
|
397 |
+
_fn_summarize(mid, chat_id, body[11:].strip()); return {"success": True}
|
|
|
|
|
|
|
398 |
if low.startswith("/translate "):
|
399 |
+
lang, txt = body[11:].split("|",1)
|
400 |
+
_fn_translate(mid, chat_id, lang.strip(), txt.strip()); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
401 |
if low == "/joke":
|
402 |
+
_fn_joke(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
403 |
if low.startswith("/weather "):
|
404 |
+
_fn_weather(mid, chat_id, body[9:].strip().replace(" ","+")); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
if low.startswith("/weatherpoem "):
|
406 |
+
_fn_weather_poem(mid, chat_id, body[13:].strip().replace(" ","+")); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
if low == "/inspire":
|
408 |
+
_fn_inspire(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
409 |
if low == "/trivia":
|
410 |
+
_fn_trivia(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
411 |
if low.startswith("/answer"):
|
412 |
+
_fn_answer(mid, chat_id, body[7:].strip()); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
if low.startswith("/meme "):
|
414 |
+
_fn_meme(mid, chat_id, body[6:].strip()); return {"success": True}
|
|
|
|
|
|
|
415 |
if low.startswith("/poll "):
|
416 |
+
parts = [p.strip() for p in body[6:].split("|")]
|
417 |
+
_fn_poll(mid, chat_id, parts[0], parts[1:]); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
if chat_id in polls and low.isdigit():
|
419 |
+
_fn_poll_vote(mid, chat_id,
|
420 |
+
data["senderData"]["sender"],
|
421 |
+
int(low)); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
422 |
if low == "/results":
|
423 |
+
_fn_poll_results(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
424 |
if low == "/endpoll":
|
425 |
+
_fn_poll_end(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
426 |
if low.startswith("/gen"):
|
427 |
+
parts = body[4:].split("|",1)
|
428 |
prompt = parts[0].strip()
|
429 |
+
cnt = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
|
430 |
+
client.send_message(mid, chat_id, f"β¨ Generating {cnt} image(s)β¦")
|
431 |
+
task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
|
432 |
+
"prompt":prompt,"num_images":cnt})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
return {"success": True}
|
434 |
|
435 |
+
# 4) Fallback β function calling router
|
436 |
+
intent = route_intent(body)
|
437 |
+
act = intent.get("action")
|
438 |
+
if act == "generate_image":
|
439 |
+
pr = intent.get("prompt","")
|
440 |
+
ct = intent.get("count",1)
|
441 |
+
client.send_message(mid, chat_id, f"π Generating {ct} images for β{pr}ββ¦")
|
442 |
+
task_queue.put({"type":"image","message_id":mid,"chat_id":chat_id,
|
443 |
+
"prompt":pr,"num_images":ct})
|
444 |
+
else:
|
445 |
+
# send_text or any unknown
|
446 |
+
msg = intent.get("message", "Sorry, I didn't understand.")
|
447 |
+
client.send_message(mid, chat_id, msg)
|
448 |
+
task_queue.put({"type":"audio","message_id":mid,"chat_id":chat_id,
|
449 |
+
"prompt":msg})
|
450 |
+
|
451 |
return {"success": True}
|
452 |
|
453 |
@app.get("/", response_class=PlainTextResponse)
|
config.yaml
CHANGED
@@ -19,9 +19,57 @@ config:
|
|
19 |
β’ /results β show poll results
|
20 |
β’ /endpoll β end the poll
|
21 |
Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
|
22 |
-
For any other message,
|
23 |
char: Eve
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
SD:
|
26 |
steps: 30
|
27 |
negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
|
|
|
19 |
β’ /results β show poll results
|
20 |
β’ /endpoll β end the poll
|
21 |
Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it.
|
22 |
+
For any other message, you can either chat normally or invoke one of your tools.
|
23 |
char: Eve
|
24 |
|
25 |
+
# Tell your LLM client to expose these functions
|
26 |
+
function_calling: auto
|
27 |
+
|
28 |
+
functions:
|
29 |
+
- name: generate_image
|
30 |
+
description: Generate one or more images from a prompt.
|
31 |
+
parameters:
|
32 |
+
type: object
|
33 |
+
properties:
|
34 |
+
prompt:
|
35 |
+
type: string
|
36 |
+
description: The text prompt to generate an image for
|
37 |
+
count:
|
38 |
+
type: integer
|
39 |
+
description: Number of images to generate
|
40 |
+
required:
|
41 |
+
- prompt
|
42 |
+
|
43 |
+
- name: send_text
|
44 |
+
description: Send a plain text reply back to the user.
|
45 |
+
parameters:
|
46 |
+
type: object
|
47 |
+
properties:
|
48 |
+
message:
|
49 |
+
type: string
|
50 |
+
description: The text content to send
|
51 |
+
required:
|
52 |
+
- message
|
53 |
+
|
54 |
+
bot:
|
55 |
+
default_image_count: 4
|
56 |
+
skip:
|
57 |
+
mentions: true
|
58 |
+
quotes: true
|
59 |
+
|
60 |
+
image:
|
61 |
+
model: flux
|
62 |
+
width: 1920
|
63 |
+
height: 1080
|
64 |
+
enhance: true
|
65 |
+
safe: false
|
66 |
+
nologo: true
|
67 |
+
|
68 |
+
voice:
|
69 |
+
model: openai-audio
|
70 |
+
voice: coral
|
71 |
+
|
72 |
+
|
73 |
SD:
|
74 |
steps: 30
|
75 |
negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
|