Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
1ac3ddc
1
Parent(s):
3ea99b4
Refactor image and voice reply generation functions for improved readability; enhance logging for better debugging and error handling.
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ from pydantic import BaseModel, Field, ValidationError
|
|
14 |
|
15 |
from FLUX import generate_image
|
16 |
from VoiceReply import generate_voice_reply
|
17 |
-
from polLLM import generate_llm, LLMBadRequestError #
|
18 |
|
19 |
# --- Logging Setup ---------------------------------------------------------
|
20 |
|
@@ -259,9 +259,15 @@ def _fn_poll_end(mid, cid):
|
|
259 |
)
|
260 |
_fn_send_text(mid, cid, txt)
|
261 |
|
262 |
-
def _fn_generate_images(
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
265 |
_fn_send_accept(message_id, chat_id, f"✨ Generating {count} image(s)…")
|
266 |
for i in range(1, count+1):
|
267 |
try:
|
@@ -277,7 +283,12 @@ def _fn_generate_images(message_id: str, chat_id: str, prompt: str,
|
|
277 |
logger.warning(f"Img {i}/{count} failed: {e}")
|
278 |
_fn_send_text(message_id, chat_id, f"😢 Failed to generate image {i}/{count}.")
|
279 |
|
280 |
-
def _fn_voice_reply(
|
|
|
|
|
|
|
|
|
|
|
281 |
proc = (
|
282 |
f"Just say this exactly as written in a friendly, playful, "
|
283 |
f"happy and helpful but a little bit clumsy-cute way: {prompt}"
|
@@ -398,15 +409,19 @@ def route_intent(user_input: str, chat_id: str, sender: str):
|
|
398 |
clear_history(chat_id, sender)
|
399 |
return SendTextIntent(action="send_text", message="Oops, I lost my train of thought—let’s start fresh!")
|
400 |
|
|
|
|
|
401 |
# 1) Strict: try each Pydantic model
|
402 |
try:
|
403 |
parsed = json.loads(raw)
|
|
|
404 |
except json.JSONDecodeError:
|
405 |
return SendTextIntent(action="send_text", message=raw)
|
406 |
|
407 |
for M in INTENT_MODELS:
|
408 |
try:
|
409 |
-
intent = M.
|
|
|
410 |
return intent
|
411 |
except ValidationError:
|
412 |
continue
|
@@ -442,9 +457,15 @@ def route_intent(user_input: str, chat_id: str, sender: str):
|
|
442 |
kwargs["choice"] = int(data.get("choice",0))
|
443 |
try:
|
444 |
# coerce into Pydantic for uniform interface
|
445 |
-
model = next(
|
446 |
-
|
447 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
448 |
return SendTextIntent(action="send_text", message=raw)
|
449 |
|
450 |
return SendTextIntent(action="send_text", message=raw)
|
@@ -476,7 +497,7 @@ async def whatsapp_webhook(request: Request):
|
|
476 |
sender = data["senderData"]["sender"]
|
477 |
mid = data["idMessage"]
|
478 |
set_thread_context(chat_id, sender, mid)
|
479 |
-
logger.debug("Received webhook")
|
480 |
|
481 |
if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"] != "incomingMessageReceived":
|
482 |
return {"success": True}
|
@@ -488,8 +509,10 @@ async def whatsapp_webhook(request: Request):
|
|
488 |
|
489 |
body = (tmd.get("textMessage") or tmd.get("text","")).strip()
|
490 |
record_user_message(chat_id, sender, body)
|
|
|
491 |
|
492 |
low = body.lower()
|
|
|
493 |
if low == "/help":
|
494 |
_fn_send_text(mid, chat_id, help_text)
|
495 |
return {"success": True}
|
@@ -543,15 +566,34 @@ async def whatsapp_webhook(request: Request):
|
|
543 |
})
|
544 |
return {"success": True}
|
545 |
|
|
|
546 |
if tmd.get("contextInfo", {}).get("mentionedJidList"):
|
547 |
return {"success": True}
|
548 |
|
549 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
550 |
intent = route_intent(effective, chat_id, sender)
|
|
|
551 |
handler = ACTION_HANDLERS.get(intent.action)
|
552 |
if handler:
|
553 |
-
|
|
|
|
|
554 |
else:
|
|
|
555 |
_fn_send_text(mid, chat_id, "Sorry, I didn't understand that.")
|
556 |
|
557 |
return {"success": True}
|
|
|
14 |
|
15 |
from FLUX import generate_image
|
16 |
from VoiceReply import generate_voice_reply
|
17 |
+
from polLLM import generate_llm, LLMBadRequestError # assume this exception is raised on 400
|
18 |
|
19 |
# --- Logging Setup ---------------------------------------------------------
|
20 |
|
|
|
259 |
)
|
260 |
_fn_send_text(mid, cid, txt)
|
261 |
|
262 |
+
def _fn_generate_images(
|
263 |
+
message_id: str,
|
264 |
+
chat_id: str,
|
265 |
+
prompt: str,
|
266 |
+
count: int = 1,
|
267 |
+
width: Optional[int] = None,
|
268 |
+
height: Optional[int] = None,
|
269 |
+
**_
|
270 |
+
):
|
271 |
_fn_send_accept(message_id, chat_id, f"✨ Generating {count} image(s)…")
|
272 |
for i in range(1, count+1):
|
273 |
try:
|
|
|
283 |
logger.warning(f"Img {i}/{count} failed: {e}")
|
284 |
_fn_send_text(message_id, chat_id, f"😢 Failed to generate image {i}/{count}.")
|
285 |
|
286 |
+
def _fn_voice_reply(
|
287 |
+
message_id: str,
|
288 |
+
chat_id: str,
|
289 |
+
prompt: str,
|
290 |
+
**_
|
291 |
+
):
|
292 |
proc = (
|
293 |
f"Just say this exactly as written in a friendly, playful, "
|
294 |
f"happy and helpful but a little bit clumsy-cute way: {prompt}"
|
|
|
409 |
clear_history(chat_id, sender)
|
410 |
return SendTextIntent(action="send_text", message="Oops, I lost my train of thought—let’s start fresh!")
|
411 |
|
412 |
+
logger.debug(f"LLM raw response: {raw}")
|
413 |
+
|
414 |
# 1) Strict: try each Pydantic model
|
415 |
try:
|
416 |
parsed = json.loads(raw)
|
417 |
+
logger.debug(f"Parsed JSON: {parsed}")
|
418 |
except json.JSONDecodeError:
|
419 |
return SendTextIntent(action="send_text", message=raw)
|
420 |
|
421 |
for M in INTENT_MODELS:
|
422 |
try:
|
423 |
+
intent = M.model_validate(parsed)
|
424 |
+
logger.debug(f"Matched intent model: {M.__name__} with data {parsed}")
|
425 |
return intent
|
426 |
except ValidationError:
|
427 |
continue
|
|
|
457 |
kwargs["choice"] = int(data.get("choice",0))
|
458 |
try:
|
459 |
# coerce into Pydantic for uniform interface
|
460 |
+
model = next(
|
461 |
+
m for m in INTENT_MODELS
|
462 |
+
if getattr(m, "__fields__", {}).get("action").default == action
|
463 |
+
)
|
464 |
+
intent = model.model_validate({"action":action, **kwargs})
|
465 |
+
logger.debug(f"Leniently matched intent model: {model.__name__} with kwargs {kwargs}")
|
466 |
+
return intent
|
467 |
+
except Exception as e:
|
468 |
+
logger.error(f"Lenient parsing into Pydantic failed: {e}")
|
469 |
return SendTextIntent(action="send_text", message=raw)
|
470 |
|
471 |
return SendTextIntent(action="send_text", message=raw)
|
|
|
497 |
sender = data["senderData"]["sender"]
|
498 |
mid = data["idMessage"]
|
499 |
set_thread_context(chat_id, sender, mid)
|
500 |
+
logger.debug(f"Received webhook for message {mid} from {sender}")
|
501 |
|
502 |
if chat_id != BotConfig.BOT_GROUP_CHAT or data["typeWebhook"] != "incomingMessageReceived":
|
503 |
return {"success": True}
|
|
|
509 |
|
510 |
body = (tmd.get("textMessage") or tmd.get("text","")).strip()
|
511 |
record_user_message(chat_id, sender, body)
|
512 |
+
logger.debug(f"User message: {body}")
|
513 |
|
514 |
low = body.lower()
|
515 |
+
# Slash commands...
|
516 |
if low == "/help":
|
517 |
_fn_send_text(mid, chat_id, help_text)
|
518 |
return {"success": True}
|
|
|
566 |
})
|
567 |
return {"success": True}
|
568 |
|
569 |
+
# Skip mentions
|
570 |
if tmd.get("contextInfo", {}).get("mentionedJidList"):
|
571 |
return {"success": True}
|
572 |
|
573 |
+
# Handle quoted replies to the bot
|
574 |
+
if md.get("typeMessage") == "quotedMessage":
|
575 |
+
ext = md["extendedTextMessageData"]
|
576 |
+
quoted = md["quotedMessage"]
|
577 |
+
if ext.get("participant") == BotConfig.BOT_JID:
|
578 |
+
effective = (
|
579 |
+
f"Quoted: {quoted.get('textMessage','')}\n"
|
580 |
+
f"User: {ext.get('text','')}"
|
581 |
+
)
|
582 |
+
else:
|
583 |
+
effective = body
|
584 |
+
else:
|
585 |
+
effective = body
|
586 |
+
|
587 |
+
# Route intent & dispatch
|
588 |
intent = route_intent(effective, chat_id, sender)
|
589 |
+
logger.debug(f"Final intent: {intent}")
|
590 |
handler = ACTION_HANDLERS.get(intent.action)
|
591 |
if handler:
|
592 |
+
kwargs = intent.model_dump(exclude={"action"})
|
593 |
+
logger.debug(f"Dispatching action '{intent.action}' with args {kwargs}")
|
594 |
+
handler(mid, chat_id, **kwargs)
|
595 |
else:
|
596 |
+
logger.warning(f"No handler for action '{intent.action}'")
|
597 |
_fn_send_text(mid, chat_id, "Sorry, I didn't understand that.")
|
598 |
|
599 |
return {"success": True}
|