Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
b9cdf7a
1
Parent(s):
b560795
Add health check endpoint and improve logging context; update requirements for telegram bot support
Browse files- app.py +7 -1
- requirements.txt +2 -1
- telebot.py +496 -0
app.py
CHANGED
@@ -9,7 +9,7 @@ from collections import defaultdict, deque
|
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
11 |
from fastapi import FastAPI, Request, HTTPException
|
12 |
-
from fastapi.responses import PlainTextResponse
|
13 |
from pydantic import BaseModel, Field, ValidationError
|
14 |
|
15 |
from FLUX import generate_image
|
@@ -609,6 +609,12 @@ async def whatsapp_webhook(request: Request):
|
|
609 |
def index():
|
610 |
return "Server is running!"
|
611 |
|
|
|
|
|
|
|
|
|
|
|
|
|
612 |
if __name__ == "__main__":
|
613 |
client.send_message_to(
|
614 |
BotConfig.BOT_GROUP_CHAT,
|
|
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
11 |
from fastapi import FastAPI, Request, HTTPException
|
12 |
+
from fastapi.responses import JSONResponse, PlainTextResponse
|
13 |
from pydantic import BaseModel, Field, ValidationError
|
14 |
|
15 |
from FLUX import generate_image
|
|
|
609 |
def index():
|
610 |
return "Server is running!"
|
611 |
|
612 |
+
@app.api_route("/health", methods=["GET", "HEAD"])
|
613 |
+
def health():
|
614 |
+
# HEAD requests ignore the body by HTTP spec, so FastAPI handles that automatically
|
615 |
+
return JSONResponse(content={"status": "ok"})
|
616 |
+
|
617 |
+
|
618 |
if __name__ == "__main__":
|
619 |
client.send_message_to(
|
620 |
BotConfig.BOT_GROUP_CHAT,
|
requirements.txt
CHANGED
@@ -4,4 +4,5 @@ openai
|
|
4 |
pillow
|
5 |
requests
|
6 |
supabase
|
7 |
-
pydantic
|
|
|
|
4 |
pillow
|
5 |
requests
|
6 |
supabase
|
7 |
+
pydantic
|
8 |
+
python-telegram-bot
|
telebot.py
ADDED
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
import requests
|
4 |
+
import logging
|
5 |
+
import queue
|
6 |
+
import json
|
7 |
+
import asyncio
|
8 |
+
from typing import List, Optional, Literal
|
9 |
+
from collections import defaultdict, deque
|
10 |
+
from concurrent.futures import ThreadPoolExecutor
|
11 |
+
|
12 |
+
from telegram import Update, Message, Bot
|
13 |
+
from telegram.ext import (
|
14 |
+
ApplicationBuilder,
|
15 |
+
ContextTypes,
|
16 |
+
CommandHandler,
|
17 |
+
MessageHandler,
|
18 |
+
filters,
|
19 |
+
)
|
20 |
+
from pydantic import BaseModel, Field, ValidationError
|
21 |
+
|
22 |
+
from FLUX import generate_image
|
23 |
+
from VoiceReply import generate_voice_reply
|
24 |
+
from polLLM import generate_llm, LLMBadRequestError
|
25 |
+
|
26 |
+
# --- Logging Setup ---------------------------------------------------------
|
27 |
+
|
28 |
+
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
|
29 |
+
logger = logging.getLogger("eve_bot")
|
30 |
+
logger.setLevel(LOG_LEVEL)
|
31 |
+
|
32 |
+
handler = logging.StreamHandler()
|
33 |
+
formatter = logging.Formatter(
|
34 |
+
"%(asctime)s [%(levelname)s] [%(chat_id)s/%(user_id)s] %(message)s"
|
35 |
+
)
|
36 |
+
handler.setFormatter(formatter)
|
37 |
+
|
38 |
+
class ContextFilter(logging.Filter):
|
39 |
+
def filter(self, record):
|
40 |
+
record.chat_id = getattr(record, "chat_id", "-")
|
41 |
+
record.user_id = getattr(record, "user_id", "-")
|
42 |
+
return True
|
43 |
+
|
44 |
+
handler.addFilter(ContextFilter())
|
45 |
+
logger.handlers = [handler]
|
46 |
+
|
47 |
+
# Thread‐local to carry context through helpers
|
48 |
+
_thread_ctx = threading.local()
|
49 |
+
def set_thread_context(chat_id, user_id, message_id):
|
50 |
+
_thread_ctx.chat_id = chat_id
|
51 |
+
_thread_ctx.user_id = user_id
|
52 |
+
_thread_ctx.message_id = message_id
|
53 |
+
|
54 |
+
def get_thread_context():
|
55 |
+
return (
|
56 |
+
getattr(_thread_ctx, "chat_id", None),
|
57 |
+
getattr(_thread_ctx, "user_id", None),
|
58 |
+
getattr(_thread_ctx, "message_id", None),
|
59 |
+
)
|
60 |
+
|
61 |
+
# --- Conversation History -------------------------------------------------
|
62 |
+
|
63 |
+
history = defaultdict(lambda: deque(maxlen=20))
|
64 |
+
|
65 |
+
def record_user_message(chat_id, user_id, message):
|
66 |
+
history[(chat_id, user_id)].append(f"User: {message}")
|
67 |
+
|
68 |
+
def record_bot_message(chat_id, user_id, message):
|
69 |
+
history[(chat_id, user_id)].append(f"Assistant: {message}")
|
70 |
+
|
71 |
+
def get_history_text(chat_id, user_id):
|
72 |
+
return "\n".join(history[(chat_id, user_id)])
|
73 |
+
|
74 |
+
def clear_history(chat_id, user_id):
|
75 |
+
history[(chat_id, user_id)].clear()
|
76 |
+
|
77 |
+
# --- Config ---------------------------------------------------------------
|
78 |
+
|
79 |
+
class BotConfig:
|
80 |
+
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
|
81 |
+
IMAGE_DIR = "/tmp/images"
|
82 |
+
AUDIO_DIR = "/tmp/audio"
|
83 |
+
DEFAULT_IMAGE_COUNT = 4
|
84 |
+
|
85 |
+
@classmethod
|
86 |
+
def validate(cls):
|
87 |
+
if not cls.TELEGRAM_TOKEN:
|
88 |
+
raise ValueError("Missing TELEGRAM_TOKEN")
|
89 |
+
|
90 |
+
BotConfig.validate()
|
91 |
+
|
92 |
+
# --- Threading & Queues ---------------------------------------------------
|
93 |
+
|
94 |
+
task_queue = queue.Queue()
|
95 |
+
polls = {}
|
96 |
+
|
97 |
+
def worker():
|
98 |
+
while True:
|
99 |
+
task = task_queue.get()
|
100 |
+
try:
|
101 |
+
if task["type"] == "image":
|
102 |
+
_fn_generate_images(**task)
|
103 |
+
except Exception as e:
|
104 |
+
logger.error(f"Worker error {task}: {e}")
|
105 |
+
finally:
|
106 |
+
task_queue.task_done()
|
107 |
+
|
108 |
+
for _ in range(4):
|
109 |
+
threading.Thread(target=worker, daemon=True).start()
|
110 |
+
|
111 |
+
# --- Core Handlers --------------------------------------------------------
|
112 |
+
|
113 |
+
async def _fn_send_text(mid: int, cid: int, text: str, context: ContextTypes.DEFAULT_TYPE):
|
114 |
+
chat_id, user_id, _ = get_thread_context()
|
115 |
+
msg: Message = await context.bot.send_message(
|
116 |
+
chat_id=cid,
|
117 |
+
text=text,
|
118 |
+
reply_to_message_id=mid
|
119 |
+
)
|
120 |
+
record_bot_message(chat_id, user_id, text)
|
121 |
+
# enqueue audio reply in async loop
|
122 |
+
context.application.create_task(_fn_voice_reply_async(
|
123 |
+
msg.message_id, cid, text, context
|
124 |
+
))
|
125 |
+
|
126 |
+
async def _fn_send_text_wrapper(mid, cid, message, context):
|
127 |
+
await _fn_send_text(mid, cid, message, context)
|
128 |
+
|
129 |
+
async def _fn_voice_reply_async(mid: int, cid: int, prompt: str, context: ContextTypes.DEFAULT_TYPE):
|
130 |
+
proc = (
|
131 |
+
f"Just say this exactly as written in a friendly, playful, "
|
132 |
+
f"happy and helpful but a little bit clumsy-cute way: {prompt}"
|
133 |
+
)
|
134 |
+
res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
|
135 |
+
if res and res[0]:
|
136 |
+
path, _ = res
|
137 |
+
with open(path, "rb") as f:
|
138 |
+
await context.bot.send_audio(chat_id=cid, audio=f, reply_to_message_id=mid)
|
139 |
+
os.remove(path)
|
140 |
+
else:
|
141 |
+
await _fn_send_text(mid, cid, prompt, context)
|
142 |
+
|
143 |
+
async def _fn_summarize(mid, cid, text, context):
|
144 |
+
summary = generate_llm(f"Summarize:\n\n{text}")
|
145 |
+
await _fn_send_text(mid, cid, summary, context)
|
146 |
+
|
147 |
+
async def _fn_translate(mid, cid, lang, text, context):
|
148 |
+
resp = generate_llm(f"Translate to {lang}:\n\n{text}")
|
149 |
+
await _fn_send_text(mid, cid, resp, context)
|
150 |
+
|
151 |
+
async def _fn_joke(mid, cid, context):
|
152 |
+
try:
|
153 |
+
j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
|
154 |
+
joke = f"{j['setup']}\n\n{j['punchline']}"
|
155 |
+
except:
|
156 |
+
joke = generate_llm("Tell me a short joke.")
|
157 |
+
await _fn_send_text(mid, cid, joke, context)
|
158 |
+
|
159 |
+
async def _fn_weather(mid, cid, loc, context):
|
160 |
+
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
161 |
+
report = generate_llm(f"Give a weather report in °C:\n\n{raw}")
|
162 |
+
await _fn_send_text(mid, cid, report, context)
|
163 |
+
|
164 |
+
async def _fn_inspire(mid, cid, context):
|
165 |
+
quote = generate_llm("Give me a unique, random short inspirational quote.")
|
166 |
+
await _fn_send_text(mid, cid, f"✨ {quote}", context)
|
167 |
+
|
168 |
+
async def _fn_meme(mid, cid, txt, context):
|
169 |
+
await context.bot.send_message(chat_id=cid, text="🎨 Generating meme…", reply_to_message_id=mid)
|
170 |
+
task_queue.put({"type":"image","message_id":mid,"chat_id":cid,"prompt":f"meme: {txt}"})
|
171 |
+
|
172 |
+
async def _fn_poll_create(mid, cid, question, options, context):
|
173 |
+
votes = {i+1:0 for i in range(len(options))}
|
174 |
+
polls[cid] = {"question": question, "options": options, "votes": votes, "voters": {}}
|
175 |
+
text = f"📊 *Poll:* {question}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(options))
|
176 |
+
await context.bot.send_message(chat_id=cid, text=text, reply_to_message_id=mid, parse_mode="Markdown")
|
177 |
+
record_bot_message(cid, get_thread_context()[1], text)
|
178 |
+
|
179 |
+
async def _fn_poll_vote(mid, cid, voter, choice, context):
|
180 |
+
poll = polls.get(cid)
|
181 |
+
if not poll or choice<1 or choice>len(poll["options"]): return
|
182 |
+
prev = poll["voters"].get(voter)
|
183 |
+
if prev: poll["votes"][prev] -= 1
|
184 |
+
poll["votes"][choice] += 1
|
185 |
+
poll["voters"][voter] = choice
|
186 |
+
await _fn_send_text(mid, cid, f"✅ Voted for {poll['options'][choice-1]}", context)
|
187 |
+
|
188 |
+
async def _fn_poll_results(mid, cid, context):
|
189 |
+
poll = polls.get(cid)
|
190 |
+
if not poll:
|
191 |
+
await _fn_send_text(mid, cid, "No active poll.", context)
|
192 |
+
return
|
193 |
+
txt = f"📊 *Results:* {poll['question']}\n" + "\n".join(
|
194 |
+
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
195 |
+
)
|
196 |
+
await _fn_send_text(mid, cid, txt, context)
|
197 |
+
|
198 |
+
async def _fn_poll_end(mid, cid, context):
|
199 |
+
poll = polls.pop(cid, None)
|
200 |
+
if not poll:
|
201 |
+
await _fn_send_text(mid, cid, "No active poll.", context)
|
202 |
+
return
|
203 |
+
txt = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
|
204 |
+
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
205 |
+
)
|
206 |
+
await _fn_send_text(mid, cid, txt, context)
|
207 |
+
|
208 |
+
def _fn_generate_images(message_id, chat_id, prompt, count=1, width=None, height=None, **_):
|
209 |
+
"""
|
210 |
+
Runs in a background thread. Spins up its own asyncio loop
|
211 |
+
so we can `await` the bot’s send_message / send_photo coroutines.
|
212 |
+
"""
|
213 |
+
b = Bot(BotConfig.TELEGRAM_TOKEN)
|
214 |
+
loop = asyncio.new_event_loop()
|
215 |
+
asyncio.set_event_loop(loop)
|
216 |
+
try:
|
217 |
+
loop.run_until_complete(
|
218 |
+
b.send_message(
|
219 |
+
chat_id=chat_id,
|
220 |
+
text=f"✨ Generating {count} image(s)…",
|
221 |
+
reply_to_message_id=message_id
|
222 |
+
)
|
223 |
+
)
|
224 |
+
for i in range(1, count+1):
|
225 |
+
try:
|
226 |
+
img, path, ret_p, url = generate_image(
|
227 |
+
prompt, str(message_id), str(message_id),
|
228 |
+
BotConfig.IMAGE_DIR, width=width, height=height
|
229 |
+
)
|
230 |
+
caption = f"✨ Image {i}/{count}: {url}\n\n{ret_p}"
|
231 |
+
with open(path, "rb") as f:
|
232 |
+
loop.run_until_complete(
|
233 |
+
b.send_photo(
|
234 |
+
chat_id=chat_id,
|
235 |
+
photo=f,
|
236 |
+
caption=caption,
|
237 |
+
reply_to_message_id=message_id
|
238 |
+
)
|
239 |
+
)
|
240 |
+
os.remove(path)
|
241 |
+
except Exception as e:
|
242 |
+
logger.warning(f"Img {i}/{count} failed: {e}")
|
243 |
+
loop.run_until_complete(
|
244 |
+
b.send_message(
|
245 |
+
chat_id=chat_id,
|
246 |
+
text=f"😢 Failed to generate image {i}/{count}.",
|
247 |
+
reply_to_message_id=message_id
|
248 |
+
)
|
249 |
+
)
|
250 |
+
finally:
|
251 |
+
loop.close()
|
252 |
+
|
253 |
+
# --- Pydantic Models & Intent Routing ------------------------------------
|
254 |
+
|
255 |
+
class BaseIntent(BaseModel):
|
256 |
+
action: str
|
257 |
+
|
258 |
+
class SummarizeIntent(BaseIntent):
|
259 |
+
action: Literal["summarize"]
|
260 |
+
text: str
|
261 |
+
|
262 |
+
class TranslateIntent(BaseIntent):
|
263 |
+
action: Literal["translate"]
|
264 |
+
lang: str
|
265 |
+
text: str
|
266 |
+
|
267 |
+
class JokeIntent(BaseIntent):
|
268 |
+
action: Literal["joke"]
|
269 |
+
|
270 |
+
class WeatherIntent(BaseIntent):
|
271 |
+
action: Literal["weather"]
|
272 |
+
location: str
|
273 |
+
|
274 |
+
class InspireIntent(BaseIntent):
|
275 |
+
action: Literal["inspire"]
|
276 |
+
|
277 |
+
class MemeIntent(BaseIntent):
|
278 |
+
action: Literal["meme"]
|
279 |
+
text: str
|
280 |
+
|
281 |
+
class PollCreateIntent(BaseIntent):
|
282 |
+
action: Literal["poll_create"]
|
283 |
+
question: str
|
284 |
+
options: List[str]
|
285 |
+
|
286 |
+
class PollVoteIntent(BaseIntent):
|
287 |
+
action: Literal["poll_vote"]
|
288 |
+
voter: str
|
289 |
+
choice: int
|
290 |
+
|
291 |
+
class PollResultsIntent(BaseIntent):
|
292 |
+
action: Literal["poll_results"]
|
293 |
+
|
294 |
+
class PollEndIntent(BaseIntent):
|
295 |
+
action: Literal["poll_end"]
|
296 |
+
|
297 |
+
class GenerateImageIntent(BaseModel):
|
298 |
+
action: Literal["generate_image"]
|
299 |
+
prompt: str
|
300 |
+
count: int = Field(default=1, ge=1)
|
301 |
+
width: Optional[int]
|
302 |
+
height: Optional[int]
|
303 |
+
|
304 |
+
class SendTextIntent(BaseIntent):
|
305 |
+
action: Literal["send_text"]
|
306 |
+
message: str
|
307 |
+
|
308 |
+
INTENT_MODELS = [
|
309 |
+
SummarizeIntent, TranslateIntent, JokeIntent, WeatherIntent,
|
310 |
+
InspireIntent, MemeIntent, PollCreateIntent, PollVoteIntent,
|
311 |
+
PollResultsIntent, PollEndIntent, GenerateImageIntent, SendTextIntent
|
312 |
+
]
|
313 |
+
|
314 |
+
async def _fn_enqueue_image(mid, cid, prompt, count, width, height, context):
|
315 |
+
task_queue.put({
|
316 |
+
"type":"image",
|
317 |
+
"message_id": mid,
|
318 |
+
"chat_id": cid,
|
319 |
+
"prompt": prompt,
|
320 |
+
"count": count,
|
321 |
+
"width": width,
|
322 |
+
"height": height
|
323 |
+
})
|
324 |
+
|
325 |
+
ACTION_HANDLERS = {
|
326 |
+
"summarize": _fn_summarize,
|
327 |
+
"translate": _fn_translate,
|
328 |
+
"joke": _fn_joke,
|
329 |
+
"weather": _fn_weather,
|
330 |
+
"inspire": _fn_inspire,
|
331 |
+
"meme": _fn_meme,
|
332 |
+
"poll_create": _fn_poll_create,
|
333 |
+
"poll_vote": _fn_poll_vote,
|
334 |
+
"poll_results": _fn_poll_results,
|
335 |
+
"poll_end": _fn_poll_end,
|
336 |
+
"generate_image": _fn_enqueue_image,
|
337 |
+
"send_text": _fn_send_text_wrapper,
|
338 |
+
}
|
339 |
+
|
340 |
+
def route_intent(user_input: str, chat_id: str, sender: str):
|
341 |
+
history_text = get_history_text(chat_id, sender)
|
342 |
+
sys_prompt = (
|
343 |
+
"You never perform work yourself—you only invoke one of the available functions. "
|
344 |
+
"When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
|
345 |
+
"Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
|
346 |
+
"If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
|
347 |
+
"- summarize(text)\n"
|
348 |
+
"- translate(lang, text)\n"
|
349 |
+
"- joke()\n"
|
350 |
+
"- weather(location)\n"
|
351 |
+
"- inspire()\n"
|
352 |
+
"- meme(text)\n"
|
353 |
+
"- poll_create(question, options)\n"
|
354 |
+
"- poll_vote(voter, choice)\n"
|
355 |
+
"- poll_results()\n"
|
356 |
+
"- poll_end()\n"
|
357 |
+
"- generate_image(prompt, count, width, height)\n"
|
358 |
+
"- send_text(message)\n\n"
|
359 |
+
"Return only raw JSON matching one of these shapes. For example:\n"
|
360 |
+
" {\"action\":\"generate_image\",\"prompt\":\"a red fox\",\"count\":3,\"width\":512,\"height\":512}\n"
|
361 |
+
"Otherwise, use send_text to reply with plain chat and you should only return one json for the current message not for previous conversations.\n"
|
362 |
+
f"Conversation so far:\n{history_text}\n\n current message: User: {user_input}"
|
363 |
+
)
|
364 |
+
try:
|
365 |
+
raw = generate_llm(sys_prompt)
|
366 |
+
except LLMBadRequestError:
|
367 |
+
clear_history(chat_id, sender)
|
368 |
+
return SendTextIntent(action="send_text", message="Oops, let’s start fresh!")
|
369 |
+
try:
|
370 |
+
parsed = json.loads(raw)
|
371 |
+
except json.JSONDecodeError:
|
372 |
+
return SendTextIntent(action="send_text", message=raw)
|
373 |
+
|
374 |
+
for M in INTENT_MODELS:
|
375 |
+
try:
|
376 |
+
return M.model_validate(parsed)
|
377 |
+
except ValidationError:
|
378 |
+
continue
|
379 |
+
|
380 |
+
action = parsed.get("action")
|
381 |
+
if action in ACTION_HANDLERS:
|
382 |
+
data = parsed
|
383 |
+
kwargs = {}
|
384 |
+
if action == "generate_image":
|
385 |
+
kwargs = {
|
386 |
+
"prompt": data.get("prompt",""),
|
387 |
+
"count": int(data.get("count", BotConfig.DEFAULT_IMAGE_COUNT)),
|
388 |
+
"width": data.get("width"),
|
389 |
+
"height": data.get("height"),
|
390 |
+
}
|
391 |
+
elif action == "send_text":
|
392 |
+
kwargs = {"message": data.get("message","")}
|
393 |
+
elif action == "translate":
|
394 |
+
kwargs = {"lang": data.get("lang",""), "text": data.get("text","")}
|
395 |
+
elif action == "summarize":
|
396 |
+
kwargs = {"text": data.get("text","")}
|
397 |
+
elif action == "weather":
|
398 |
+
kwargs = {"location": data.get("location","")}
|
399 |
+
elif action == "meme":
|
400 |
+
kwargs = {"text": data.get("text","")}
|
401 |
+
elif action == "poll_create":
|
402 |
+
kwargs = {"question": data.get("question",""), "options": data.get("options",[])}
|
403 |
+
elif action == "poll_vote":
|
404 |
+
kwargs = {"voter": sender, "choice": int(data.get("choice",0))}
|
405 |
+
try:
|
406 |
+
model = next(m for m in INTENT_MODELS if getattr(m, "__fields__", {}).get("action").default == action)
|
407 |
+
return model.model_validate({"action":action, **kwargs})
|
408 |
+
except Exception:
|
409 |
+
return SendTextIntent(action="send_text", message=raw)
|
410 |
+
|
411 |
+
return SendTextIntent(action="send_text", message=raw)
|
412 |
+
|
413 |
+
# --- Telegram Handlers ----------------------------------------------------
|
414 |
+
|
415 |
+
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
416 |
+
await update.message.reply_text("🌟 Eve is online! Type /help to see commands.")
|
417 |
+
|
418 |
+
async def help_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
419 |
+
await update.message.reply_markdown(
|
420 |
+
"🤖 *Eve* commands:\n"
|
421 |
+
"• /help\n"
|
422 |
+
"• /summarize <text>\n"
|
423 |
+
"• /translate <lang>|<text>\n"
|
424 |
+
"• /joke\n"
|
425 |
+
"• /weather <loc>\n"
|
426 |
+
"• /inspire\n"
|
427 |
+
"• /meme <text>\n"
|
428 |
+
"• /poll <Q>|opt1|opt2 …\n"
|
429 |
+
"• /results\n"
|
430 |
+
"• /endpoll\n"
|
431 |
+
"• /gen <prompt>|<count>|<width>|<height>\n"
|
432 |
+
"Otherwise just chat with me."
|
433 |
+
)
|
434 |
+
|
435 |
+
async def message_router(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
436 |
+
msg: Message = update.message
|
437 |
+
chat_id = msg.chat.id
|
438 |
+
user_id = msg.from_user.id
|
439 |
+
mid = msg.message_id
|
440 |
+
text = msg.text or ""
|
441 |
+
|
442 |
+
set_thread_context(chat_id, user_id, mid)
|
443 |
+
record_user_message(chat_id, user_id, text)
|
444 |
+
|
445 |
+
low = text.lower().strip()
|
446 |
+
if low == "/help":
|
447 |
+
return await help_cmd(update, context)
|
448 |
+
if low.startswith("/summarize "):
|
449 |
+
return await _fn_summarize(mid, chat_id, text[11:].strip(), context)
|
450 |
+
if low.startswith("/translate "):
|
451 |
+
lang, txt = text[11:].split("|",1)
|
452 |
+
return await _fn_translate(mid, chat_id, lang.strip(), txt.strip(), context)
|
453 |
+
if low == "/joke":
|
454 |
+
return await _fn_joke(mid, chat_id, context)
|
455 |
+
if low.startswith("/weather "):
|
456 |
+
return await _fn_weather(mid, chat_id, text[9:].strip().replace(" ","+"), context)
|
457 |
+
if low == "/inspire":
|
458 |
+
return await _fn_inspire(mid, chat_id, context)
|
459 |
+
if low.startswith("/meme "):
|
460 |
+
return await _fn_meme(mid, chat_id, text[6:].strip(), context)
|
461 |
+
if low.startswith("/poll "):
|
462 |
+
parts = [p.strip() for p in text[6:].split("|")]
|
463 |
+
return await _fn_poll_create(mid, chat_id, parts[0], parts[1:], context)
|
464 |
+
if chat_id in polls and low.isdigit():
|
465 |
+
return await _fn_poll_vote(mid, chat_id, str(user_id), int(low), context)
|
466 |
+
if low == "/results":
|
467 |
+
return await _fn_poll_results(mid, chat_id, context)
|
468 |
+
if low == "/endpoll":
|
469 |
+
return await _fn_poll_end(mid, chat_id, context)
|
470 |
+
if low.startswith("/gen"):
|
471 |
+
parts = text[4:].split("|")
|
472 |
+
pr = parts[0].strip()
|
473 |
+
ct = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
|
474 |
+
width = int(parts[2]) if len(parts)>2 and parts[2].isdigit() else None
|
475 |
+
height = int(parts[3]) if len(parts)>3 and parts[3].isdigit() else None
|
476 |
+
task_queue.put({
|
477 |
+
"type":"image","message_id":mid,"chat_id":chat_id,
|
478 |
+
"prompt":pr,"count":ct,"width":width,"height":height
|
479 |
+
})
|
480 |
+
return
|
481 |
+
|
482 |
+
intent = route_intent(text, str(chat_id), str(user_id))
|
483 |
+
handler = ACTION_HANDLERS.get(intent.action)
|
484 |
+
kwargs = intent.model_dump(exclude={"action"})
|
485 |
+
await handler(mid, chat_id, **kwargs, context=context)
|
486 |
+
|
487 |
+
def main():
|
488 |
+
app = ApplicationBuilder().token(BotConfig.TELEGRAM_TOKEN).build()
|
489 |
+
app.add_handler(CommandHandler("start", start))
|
490 |
+
app.add_handler(CommandHandler("help", help_cmd))
|
491 |
+
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, message_router))
|
492 |
+
logger.info("Starting Telegram bot…")
|
493 |
+
app.run_polling()
|
494 |
+
|
495 |
+
if __name__ == "__main__":
|
496 |
+
main()
|