Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
efc3973
1
Parent(s):
9e02558
Refactor intent handling by removing unused actions and improving error handling for LLM requests; update comments for clarity.
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import requests
|
|
4 |
import logging
|
5 |
import queue
|
6 |
import json
|
7 |
-
from typing import List, Optional,
|
8 |
from collections import defaultdict, deque
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
@@ -273,7 +273,6 @@ class SendTextIntent(BaseIntent):
|
|
273 |
action: Literal["send_text"]
|
274 |
message: str
|
275 |
|
276 |
-
# list of all intent models
|
277 |
INTENT_MODELS = [
|
278 |
JokeIntent, WeatherIntent,
|
279 |
InspireIntent, GenerateImageIntent, SendTextIntent
|
@@ -287,7 +286,7 @@ ACTION_HANDLERS = {
|
|
287 |
"send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
|
288 |
}
|
289 |
|
290 |
-
# --- Intent Routing with
|
291 |
|
292 |
def route_intent(user_input: str, chat_id: str, sender: str):
|
293 |
history_text = get_history_text(chat_id, sender)
|
@@ -304,16 +303,24 @@ def route_intent(user_input: str, chat_id: str, sender: str):
|
|
304 |
)
|
305 |
prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\nUser: {user_input}"
|
306 |
|
|
|
307 |
try:
|
308 |
raw = generate_llm(prompt)
|
309 |
except LLMBadRequestError:
|
310 |
-
#
|
311 |
clear_history(chat_id, sender)
|
312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
|
314 |
logger.debug(f"LLM raw response: {raw}")
|
315 |
|
316 |
-
# 1) Strict:
|
317 |
try:
|
318 |
parsed = json.loads(raw)
|
319 |
logger.debug(f"Parsed JSON: {parsed}")
|
@@ -337,28 +344,13 @@ def route_intent(user_input: str, chat_id: str, sender: str):
|
|
337 |
kwargs = {}
|
338 |
if action == "generate_image":
|
339 |
kwargs["prompt"] = data.get("prompt","")
|
340 |
-
kwargs["count"]
|
341 |
-
kwargs["width"]
|
342 |
kwargs["height"] = data.get("height")
|
343 |
elif action == "send_text":
|
344 |
kwargs["message"] = data.get("message","")
|
345 |
-
|
346 |
-
kwargs["lang"] = data.get("lang","")
|
347 |
-
kwargs["text"] = data.get("text","")
|
348 |
-
elif action == "summarize":
|
349 |
-
kwargs["text"] = data.get("text","")
|
350 |
-
elif action == "weather":
|
351 |
-
kwargs["location"] = data.get("location","")
|
352 |
-
elif action == "meme":
|
353 |
-
kwargs["text"] = data.get("text","")
|
354 |
-
elif action == "poll_create":
|
355 |
-
kwargs["question"] = data.get("question","")
|
356 |
-
kwargs["options"] = data.get("options",[])
|
357 |
-
elif action == "poll_vote":
|
358 |
-
kwargs["voter"] = sender
|
359 |
-
kwargs["choice"] = int(data.get("choice",0))
|
360 |
try:
|
361 |
-
# coerce into Pydantic for uniform interface
|
362 |
model = next(
|
363 |
m for m in INTENT_MODELS
|
364 |
if getattr(m, "__fields__", {}).get("action").default == action
|
@@ -427,7 +419,7 @@ async def whatsapp_webhook(request: Request):
|
|
427 |
parts = body[4:].split("|")
|
428 |
pr = parts[0].strip()
|
429 |
ct = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
|
430 |
-
width
|
431 |
height = int(parts[3]) if len(parts) > 3 and parts[3].isdigit() else None
|
432 |
_fn_send_accept(mid, chat_id, f"✨ Generating {ct} image(s)…")
|
433 |
task_queue.put({
|
|
|
4 |
import logging
|
5 |
import queue
|
6 |
import json
|
7 |
+
from typing import List, Optional, Literal
|
8 |
from collections import defaultdict, deque
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
|
|
273 |
action: Literal["send_text"]
|
274 |
message: str
|
275 |
|
|
|
276 |
INTENT_MODELS = [
|
277 |
JokeIntent, WeatherIntent,
|
278 |
InspireIntent, GenerateImageIntent, SendTextIntent
|
|
|
286 |
"send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
|
287 |
}
|
288 |
|
289 |
+
# --- Intent Routing with 400‐Retry & No Spam ------------------------------
|
290 |
|
291 |
def route_intent(user_input: str, chat_id: str, sender: str):
|
292 |
history_text = get_history_text(chat_id, sender)
|
|
|
303 |
)
|
304 |
prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\nUser: {user_input}"
|
305 |
|
306 |
+
# try primary LLM call
|
307 |
try:
|
308 |
raw = generate_llm(prompt)
|
309 |
except LLMBadRequestError:
|
310 |
+
# on 400: clear history, retry once without context
|
311 |
clear_history(chat_id, sender)
|
312 |
+
logger.warning("LLMBadRequestError—cleared history, retrying without context")
|
313 |
+
prompt_retry = f"{sys_prompt}\nUser: {user_input}"
|
314 |
+
try:
|
315 |
+
raw = generate_llm(prompt_retry)
|
316 |
+
except LLMBadRequestError:
|
317 |
+
logger.error("LLMBadRequestError on retry—aborting intent routing")
|
318 |
+
return SendTextIntent(action="send_text",
|
319 |
+
message="Sorry, I'm having trouble. Please try again in a moment.")
|
320 |
|
321 |
logger.debug(f"LLM raw response: {raw}")
|
322 |
|
323 |
+
# 1) Strict: Pydantic validation
|
324 |
try:
|
325 |
parsed = json.loads(raw)
|
326 |
logger.debug(f"Parsed JSON: {parsed}")
|
|
|
344 |
kwargs = {}
|
345 |
if action == "generate_image":
|
346 |
kwargs["prompt"] = data.get("prompt","")
|
347 |
+
kwargs["count"] = int(data.get("count", BotConfig.DEFAULT_IMAGE_COUNT))
|
348 |
+
kwargs["width"] = data.get("width")
|
349 |
kwargs["height"] = data.get("height")
|
350 |
elif action == "send_text":
|
351 |
kwargs["message"] = data.get("message","")
|
352 |
+
# ... add other parameter extractions if needed ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
try:
|
|
|
354 |
model = next(
|
355 |
m for m in INTENT_MODELS
|
356 |
if getattr(m, "__fields__", {}).get("action").default == action
|
|
|
419 |
parts = body[4:].split("|")
|
420 |
pr = parts[0].strip()
|
421 |
ct = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
|
422 |
+
width = int(parts[2]) if len(parts) > 2 and parts[2].isdigit() else None
|
423 |
height = int(parts[3]) if len(parts) > 3 and parts[3].isdigit() else None
|
424 |
_fn_send_accept(mid, chat_id, f"✨ Generating {ct} image(s)…")
|
425 |
task_queue.put({
|