Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
9125c67
1
Parent(s):
1ac3ddc
Refactor conversation history to limit stored messages to 10; remove unused poll functions and update command guidance in config for clarity and conciseness.
Browse files- app.py +3 -82
- config.yaml +63 -16
app.py
CHANGED
@@ -53,7 +53,7 @@ def get_thread_context():
|
|
53 |
|
54 |
# --- Conversation History -------------------------------------------------
|
55 |
|
56 |
-
history = defaultdict(lambda: deque(maxlen=
|
57 |
|
58 |
def record_user_message(chat_id, sender, message):
|
59 |
history[(chat_id, sender)].append(f"User: {message}")
|
@@ -222,43 +222,6 @@ def _fn_meme(mid, cid, txt):
|
|
222 |
"prompt": f"meme: {txt}"
|
223 |
})
|
224 |
|
225 |
-
def _fn_poll_create(mid, cid, question, options):
|
226 |
-
votes = {i+1:0 for i in range(len(options))}
|
227 |
-
polls[cid] = {"question": question, "options": options, "votes": votes, "voters": {}}
|
228 |
-
text = f"📊 *Poll:* {question}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(options))
|
229 |
-
_fn_send_text(mid, cid, text)
|
230 |
-
|
231 |
-
def _fn_poll_vote(mid, cid, voter, choice):
|
232 |
-
poll = polls.get(cid)
|
233 |
-
if not poll or choice<1 or choice>len(poll["options"]):
|
234 |
-
return
|
235 |
-
prev = poll["voters"].get(voter)
|
236 |
-
if prev:
|
237 |
-
poll["votes"][prev] -= 1
|
238 |
-
poll["votes"][choice] += 1
|
239 |
-
poll["voters"][voter] = choice
|
240 |
-
_fn_send_text(mid, cid, f"✅ Voted for {poll['options'][choice-1]}")
|
241 |
-
|
242 |
-
def _fn_poll_results(mid, cid):
|
243 |
-
poll = polls.get(cid)
|
244 |
-
if not poll:
|
245 |
-
_fn_send_text(mid, cid, "No active poll.")
|
246 |
-
return
|
247 |
-
txt = f"📊 *Results:* {poll['question']}\n" + "\n".join(
|
248 |
-
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
249 |
-
)
|
250 |
-
_fn_send_text(mid, cid, txt)
|
251 |
-
|
252 |
-
def _fn_poll_end(mid, cid):
|
253 |
-
poll = polls.pop(cid, None)
|
254 |
-
if not poll:
|
255 |
-
_fn_send_text(mid, cid, "No active poll.")
|
256 |
-
return
|
257 |
-
txt = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
|
258 |
-
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
259 |
-
)
|
260 |
-
_fn_send_text(mid, cid, txt)
|
261 |
-
|
262 |
def _fn_generate_images(
|
263 |
message_id: str,
|
264 |
chat_id: str,
|
@@ -358,22 +321,14 @@ class SendTextIntent(BaseIntent):
|
|
358 |
|
359 |
# list of all intent models
|
360 |
INTENT_MODELS = [
|
361 |
-
|
362 |
-
InspireIntent,
|
363 |
-
PollResultsIntent, PollEndIntent, GenerateImageIntent, SendTextIntent
|
364 |
]
|
365 |
|
366 |
ACTION_HANDLERS = {
|
367 |
-
"summarize": lambda mid,cid,**i: _fn_summarize(mid,cid,i["text"]),
|
368 |
-
"translate": lambda mid,cid,**i: _fn_translate(mid,cid,i["lang"],i["text"]),
|
369 |
"joke": lambda mid,cid,**i: _fn_joke(mid,cid),
|
370 |
"weather": lambda mid,cid,**i: _fn_weather(mid,cid,i["location"]),
|
371 |
"inspire": lambda mid,cid,**i: _fn_inspire(mid,cid),
|
372 |
-
"meme": lambda mid,cid,**i: _fn_meme(mid,cid,i["text"]),
|
373 |
-
"poll_create": lambda mid,cid,**i: _fn_poll_create(mid,cid,i["question"],i["options"]),
|
374 |
-
"poll_vote": lambda mid,cid,**i: _fn_poll_vote(mid,cid,i["voter"],i["choice"]),
|
375 |
-
"poll_results": lambda mid,cid,**i: _fn_poll_results(mid,cid),
|
376 |
-
"poll_end": lambda mid,cid,**i: _fn_poll_end(mid,cid),
|
377 |
"generate_image": _fn_generate_images,
|
378 |
"send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
|
379 |
}
|
@@ -384,16 +339,9 @@ def route_intent(user_input: str, chat_id: str, sender: str):
|
|
384 |
history_text = get_history_text(chat_id, sender)
|
385 |
sys_prompt = (
|
386 |
"You are Eve. You can either chat or call one of these functions:\n"
|
387 |
-
"- summarize(text)\n"
|
388 |
-
"- translate(lang, text)\n"
|
389 |
"- joke()\n"
|
390 |
"- weather(location)\n"
|
391 |
"- inspire()\n"
|
392 |
-
"- meme(text)\n"
|
393 |
-
"- poll_create(question, options)\n"
|
394 |
-
"- poll_vote(voter, choice)\n"
|
395 |
-
"- poll_results()\n"
|
396 |
-
"- poll_end()\n"
|
397 |
"- generate_image(prompt, count, width, height)\n"
|
398 |
"- send_text(message)\n\n"
|
399 |
"Return only raw JSON matching one of these shapes. For example:\n"
|
@@ -476,13 +424,9 @@ app = FastAPI()
|
|
476 |
help_text = (
|
477 |
"🤖 *Eve* commands:\n"
|
478 |
"• /help\n"
|
479 |
-
"• /summarize <text>\n"
|
480 |
-
"• /translate <lang>|<text>\n"
|
481 |
"• /joke\n"
|
482 |
"• /weather <loc>\n"
|
483 |
"• /inspire\n"
|
484 |
-
"• /meme <text>\n"
|
485 |
-
"• /poll <Q>|… / /results / /endpoll\n"
|
486 |
"• /gen <prompt>|<count>|<width>|<height>\n"
|
487 |
"Otherwise chat or reply to my message to invoke tools."
|
488 |
)
|
@@ -516,13 +460,6 @@ async def whatsapp_webhook(request: Request):
|
|
516 |
if low == "/help":
|
517 |
_fn_send_text(mid, chat_id, help_text)
|
518 |
return {"success": True}
|
519 |
-
if low.startswith("/summarize "):
|
520 |
-
_fn_summarize(mid, chat_id, body[11:].strip())
|
521 |
-
return {"success": True}
|
522 |
-
if low.startswith("/translate "):
|
523 |
-
lang, txt = body[11:].split("|", 1)
|
524 |
-
_fn_translate(mid, chat_id, lang.strip(), txt.strip())
|
525 |
-
return {"success": True}
|
526 |
if low == "/joke":
|
527 |
_fn_joke(mid, chat_id)
|
528 |
return {"success": True}
|
@@ -532,22 +469,6 @@ async def whatsapp_webhook(request: Request):
|
|
532 |
if low == "/inspire":
|
533 |
_fn_inspire(mid, chat_id)
|
534 |
return {"success": True}
|
535 |
-
if low.startswith("/meme "):
|
536 |
-
_fn_meme(mid, chat_id, body[6:].strip())
|
537 |
-
return {"success": True}
|
538 |
-
if low.startswith("/poll "):
|
539 |
-
parts = [p.strip() for p in body[6:].split("|")]
|
540 |
-
_fn_poll_create(mid, chat_id, parts[0], parts[1:])
|
541 |
-
return {"success": True}
|
542 |
-
if chat_id in polls and low.isdigit():
|
543 |
-
_fn_poll_vote(mid, chat_id, sender, int(low))
|
544 |
-
return {"success": True}
|
545 |
-
if low == "/results":
|
546 |
-
_fn_poll_results(mid, chat_id)
|
547 |
-
return {"success": True}
|
548 |
-
if low == "/endpoll":
|
549 |
-
_fn_poll_end(mid, chat_id)
|
550 |
-
return {"success": True}
|
551 |
if low.startswith("/gen"):
|
552 |
parts = body[4:].split("|")
|
553 |
pr = parts[0].strip()
|
|
|
53 |
|
54 |
# --- Conversation History -------------------------------------------------
|
55 |
|
56 |
+
history = defaultdict(lambda: deque(maxlen=5))
|
57 |
|
58 |
def record_user_message(chat_id, sender, message):
|
59 |
history[(chat_id, sender)].append(f"User: {message}")
|
|
|
222 |
"prompt": f"meme: {txt}"
|
223 |
})
|
224 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
def _fn_generate_images(
|
226 |
message_id: str,
|
227 |
chat_id: str,
|
|
|
321 |
|
322 |
# list of all intent models
|
323 |
INTENT_MODELS = [
|
324 |
+
JokeIntent, WeatherIntent,
|
325 |
+
InspireIntent, GenerateImageIntent, SendTextIntent
|
|
|
326 |
]
|
327 |
|
328 |
ACTION_HANDLERS = {
|
|
|
|
|
329 |
"joke": lambda mid,cid,**i: _fn_joke(mid,cid),
|
330 |
"weather": lambda mid,cid,**i: _fn_weather(mid,cid,i["location"]),
|
331 |
"inspire": lambda mid,cid,**i: _fn_inspire(mid,cid),
|
|
|
|
|
|
|
|
|
|
|
332 |
"generate_image": _fn_generate_images,
|
333 |
"send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
|
334 |
}
|
|
|
339 |
history_text = get_history_text(chat_id, sender)
|
340 |
sys_prompt = (
|
341 |
"You are Eve. You can either chat or call one of these functions:\n"
|
|
|
|
|
342 |
"- joke()\n"
|
343 |
"- weather(location)\n"
|
344 |
"- inspire()\n"
|
|
|
|
|
|
|
|
|
|
|
345 |
"- generate_image(prompt, count, width, height)\n"
|
346 |
"- send_text(message)\n\n"
|
347 |
"Return only raw JSON matching one of these shapes. For example:\n"
|
|
|
424 |
help_text = (
|
425 |
"🤖 *Eve* commands:\n"
|
426 |
"• /help\n"
|
|
|
|
|
427 |
"• /joke\n"
|
428 |
"• /weather <loc>\n"
|
429 |
"• /inspire\n"
|
|
|
|
|
430 |
"• /gen <prompt>|<count>|<width>|<height>\n"
|
431 |
"Otherwise chat or reply to my message to invoke tools."
|
432 |
)
|
|
|
460 |
if low == "/help":
|
461 |
_fn_send_text(mid, chat_id, help_text)
|
462 |
return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
463 |
if low == "/joke":
|
464 |
_fn_joke(mid, chat_id)
|
465 |
return {"success": True}
|
|
|
469 |
if low == "/inspire":
|
470 |
_fn_inspire(mid, chat_id)
|
471 |
return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
if low.startswith("/gen"):
|
473 |
parts = body[4:].split("|")
|
474 |
pr = parts[0].strip()
|
config.yaml
CHANGED
@@ -1,20 +1,67 @@
|
|
1 |
config:
|
2 |
llm:
|
3 |
-
model: koboldcpp/HF_SPACE_Tiefighter-13B
|
4 |
system_prompt: |-
|
5 |
You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
|
6 |
-
You
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
config:
|
2 |
llm:
|
|
|
3 |
system_prompt: |-
|
4 |
You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
|
5 |
+
**You MUST follow these rules exactly:**
|
6 |
+
|
7 |
+
1. **Supported commands** (only these—no others):
|
8 |
+
• `/help`
|
9 |
+
– List all available commands.
|
10 |
+
|
11 |
+
• `/gen <prompt>|<count>|<width>|<height>`
|
12 |
+
– Generate `<count>` images (integer 1–10, default 4).
|
13 |
+
– `<width>|<height>` must be one of:
|
14 |
+
- `512|512` (square)
|
15 |
+
- `512|1024` (portrait)
|
16 |
+
- `1024|512` (landscape)
|
17 |
+
- `1024|1024` (square)
|
18 |
+
- `1704|960` (wide landscape)
|
19 |
+
|
20 |
+
• `/joke`
|
21 |
+
– Tell a short, funny joke.
|
22 |
+
|
23 |
+
• `/weather <location>`
|
24 |
+
– Provide a concise, creative weather report in °C for `<location>`.
|
25 |
+
|
26 |
+
• `/inspire`
|
27 |
+
– Share a short, uplifting inspirational quote.
|
28 |
+
|
29 |
+
2. **Function‑calling**
|
30 |
+
You can either chat normally or invoke one of these functions:
|
31 |
+
```json
|
32 |
+
// Generate one or more images
|
33 |
+
{
|
34 |
+
"action": "generate_image",
|
35 |
+
"prompt": "<string>",
|
36 |
+
"count": <int>,
|
37 |
+
"width": <int>,
|
38 |
+
"height": <int>
|
39 |
+
}
|
40 |
+
|
41 |
+
// Send plain text reply
|
42 |
+
{
|
43 |
+
"action": "send_text",
|
44 |
+
"message": "<string>"
|
45 |
+
}
|
46 |
+
```
|
47 |
+
– **When calling**, return **only** the raw JSON object—no extra text or formatting.
|
48 |
+
– **Otherwise**, return a `send_text` JSON object.
|
49 |
+
|
50 |
+
3. **Parameter validation**
|
51 |
+
– If any parameter is missing, out of range, or not one of the allowed values, respond with:
|
52 |
+
```
|
53 |
+
⚠️ Invalid command. Correct usage:
|
54 |
+
/gen <prompt>|<count:1–10>|<width>|<height>
|
55 |
+
```
|
56 |
+
– Do **not** execute malformed commands.
|
57 |
+
|
58 |
+
4. **Tone & format**
|
59 |
+
– Always use a concise, friendly tone.
|
60 |
+
– For valid commands or function calls, produce **only** the function JSON or the direct output—no extra commentary.
|
61 |
+
– For freeform chat, wrap your reply in a `send_text` JSON.
|
62 |
+
|
63 |
+
5. **No deviations**
|
64 |
+
– Do **not** invent new commands, functions, or behaviors.
|
65 |
+
– Do **not** wrap your responses in markdown or code fences—just plain JSON.
|
66 |
+
|
67 |
+
char: Eve
|