Chandima Prabhath commited on
Commit
ff069bf
Β·
1 Parent(s): 07534df

Enhance app functionality with inactivity monitoring, startup messaging, and improved command responses; update configuration for text and voice replies.

Browse files
Files changed (4) hide show
  1. __pycache__/utils.cpython-313.pyc +0 -0
  2. app.py +88 -72
  3. config.yaml +2 -2
  4. polLLM.py +37 -0
__pycache__/utils.cpython-313.pyc ADDED
Binary file (535 Bytes). View file
 
app.py CHANGED
@@ -5,23 +5,25 @@ import logging
5
  import queue
6
  import re
7
  import json
 
8
  from fastapi import FastAPI, Request, HTTPException
9
  from fastapi.responses import PlainTextResponse, JSONResponse
10
  from FLUX import generate_image
11
  from VoiceReply import generate_voice_reply
12
- from llm import generate_llm
13
 
14
  # Configure logging
15
  logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
16
 
17
  # Env vars
18
- GREEN_API_URL = os.getenv("GREEN_API_URL")
19
- GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
20
- GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
21
- GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
22
- WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
23
- image_dir = "/tmp/images"
24
- audio_dir = "/tmp/audio"
 
25
 
26
  if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
27
  raise ValueError("Environment variables are not set properly")
@@ -33,7 +35,33 @@ polls = {} # chat_id β†’ {"question":…, "options":[…], "votes":{1:0
33
 
34
  app = FastAPI()
35
 
36
- # Background worker
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def worker():
38
  while True:
39
  task = task_queue.get()
@@ -55,7 +83,7 @@ threading.Thread(target=worker, daemon=True).start()
55
  # --- send helpers ---
56
  def send_message(message_id, to_number, message, retries=3):
57
  chat_id = to_number if to_number.endswith("@g.us") else to_number
58
- url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
59
  payload = {"chatId": chat_id, "message": message, "quotedMessageId": message_id}
60
  for i in range(retries):
61
  try:
@@ -63,21 +91,21 @@ def send_message(message_id, to_number, message, retries=3):
63
  r.raise_for_status()
64
  return r.json()
65
  except requests.RequestException as e:
66
- if i == retries-1:
67
  return {"error": str(e)}
68
 
69
- def send_image(message_id, to_number, image_path, caption = "Here you go!", retries=3):
70
  chat_id = to_number if to_number.endswith("@g.us") else to_number
71
- url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
72
  payload = {"chatId": chat_id, "caption": caption, "quotedMessageId": message_id}
73
- files = [("file", ("image.jpg", open(image_path, "rb"), "image/jpeg"))]
74
  for i in range(retries):
75
  try:
76
  r = requests.post(url, data=payload, files=files)
77
  r.raise_for_status()
78
  return r.json()
79
  except requests.RequestException as e:
80
- if i == retries-1:
81
  return {"error": str(e)}
82
 
83
  def send_audio(message_id, to_number, audio_path, retries=3):
@@ -85,7 +113,7 @@ def send_audio(message_id, to_number, audio_path, retries=3):
85
  chat_id = to_number if to_number.endswith("@g.us") else to_number
86
  if not os.path.exists(audio_path):
87
  logging.debug(f"Missing audio: {audio_path}")
88
- url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
89
  payload = {"chatId": chat_id, "caption": "Here is your voice reply!", "quotedMessageId": message_id}
90
  try:
91
  with open(audio_path, "rb") as f:
@@ -96,17 +124,17 @@ def send_audio(message_id, to_number, audio_path, retries=3):
96
  r.raise_for_status()
97
  return r.json()
98
  except requests.RequestException as e:
99
- if i == retries-1:
100
  return {"error": str(e)}
101
  except Exception as e:
102
  return {"error": str(e)}
103
 
104
- # --- core response fns ---
105
  def response_text(message_id, chat_id, prompt):
106
  try:
107
  msg = generate_llm(prompt)
108
  send_message(message_id, chat_id, msg)
109
- except:
110
  send_message(message_id, chat_id, "Error processing your request.")
111
 
112
  def response_audio(message_id, chat_id, prompt):
@@ -128,7 +156,7 @@ def handle_image_generation(message_id, chat_id, prompt):
128
  try:
129
  img, path, ret_prompt, url = generate_image(prompt, message_id, message_id, image_dir)
130
  if img:
131
- # Split the ret_prompt into paragraphs and wrap each in underscores for italics.
132
  formatted_ret_prompt = "\n\n".join(
133
  f"_{paragraph.strip()}_" for paragraph in ret_prompt.split("\n\n") if paragraph.strip()
134
  )
@@ -144,10 +172,13 @@ def handle_image_generation(message_id, chat_id, prompt):
144
  logging.error("Error in handle_image_generation: %s", e)
145
  send_message(message_id, chat_id, "Error generating image.")
146
 
147
- # --- webhook ---
148
  @app.post("/whatsapp")
149
  async def whatsapp_webhook(request: Request):
150
- # auth & parse
 
 
 
151
  auth = request.headers.get("Authorization", "").strip()
152
  if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
153
  raise HTTPException(403, "Unauthorized")
@@ -159,77 +190,70 @@ async def whatsapp_webhook(request: Request):
159
  return {"success": True}
160
 
161
  logging.debug("recv: %s", data)
162
- sd = data["senderData"]
163
  chat = sd["chatId"]
164
- mid = data["idMessage"]
165
  sender_jid = sd.get("sender")
166
 
167
  md = data.get("messageData", {})
168
- # drop any WhatsApp native quoted‐message event
169
  if md.get("typeMessage") == "quotedMessage" or "quotedMessage" in md:
170
  logging.debug("skip native quotedMessage")
171
  return {"success": True}
172
 
173
- # extract text + contextInfo
174
  if "textMessageData" in md:
175
- body = md["textMessageData"].get("textMessage","").strip()
176
- ctx = md["textMessageData"].get("contextInfo",{})
177
  elif "extendedTextMessageData" in md:
178
- body = md["extendedTextMessageData"].get("text","").strip()
179
- ctx = md["extendedTextMessageData"].get("contextInfo",{})
180
  else:
181
  return {"success": True}
182
 
183
- # ignore native mentions & plain @123
184
  if ctx.get("mentionedJid") or ctx.get("mentionedJidList"):
185
  return {"success": True}
186
  if chat.endswith("@g.us") and re.search(r"@\d+", body):
187
  return {"success": True}
188
 
189
- # β€”β€”β€” NEW COMMANDS β€”β€”β€”
190
  low = body.lower()
191
 
192
- # HELP
193
  if low == "/help":
194
  help_text = (
195
- "πŸ€– *Commands*: \n"
196
- "/help\n"
197
- "/summarize <text>\n"
198
- "/translate <lang>|<text>\n"
199
- "/joke\n"
200
- "/weather <location>\n"
201
- "/inspire\n"
202
- "/trivia β†’ new trivia\n"
203
- "/answer β†’ reveal answer\n"
204
- "/meme <text>\n"
205
- "/poll <Q>|<opt1>|<opt2>|…\n"
206
- "/results\n"
207
- "/endpoll\n"
208
- "/imagine <prompt>\n"
209
- "Or just send any text and I’ll reply by voice!"
210
  )
211
  send_message(mid, chat, help_text)
212
  return {"success": True}
213
 
214
- # SUMMARIZE
215
  if low.startswith("/summarize "):
216
  txt = body[len("/summarize "):].strip()
217
  summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{txt}")
218
  send_message(mid, chat, summary)
219
  return {"success": True}
220
 
221
- # TRANSLATE
222
  if low.startswith("/translate "):
223
  part = body[len("/translate "):]
224
  if "|" not in part:
225
- send_message(mid, chat, "Use `/translate Language|Text`")
226
  else:
227
- lang, txt = part.split("|",1)
228
  resp = generate_llm(f"Translate the following into {lang.strip()}:\n\n{txt.strip()}")
229
  send_message(mid, chat, resp)
230
  return {"success": True}
231
 
232
- # JOKE
233
  if low == "/joke":
234
  try:
235
  joke = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
@@ -238,7 +262,6 @@ async def whatsapp_webhook(request: Request):
238
  send_message(mid, chat, generate_llm("Tell me a short, funny joke."))
239
  return {"success": True}
240
 
241
- # WEATHER
242
  if low.startswith("/weather "):
243
  loc = body[len("/weather "):].strip().replace(" ", "+")
244
  try:
@@ -248,13 +271,11 @@ async def whatsapp_webhook(request: Request):
248
  send_message(mid, chat, "Could not fetch weather.")
249
  return {"success": True}
250
 
251
- # INSPIRE
252
  if low == "/inspire":
253
  quote = generate_llm("Give me a short inspirational quote.")
254
  send_message(mid, chat, f"✨ {quote}")
255
  return {"success": True}
256
 
257
- # TRIVIA
258
  if low == "/trivia":
259
  raw = generate_llm(
260
  "Generate a trivia question and answer in JSON: "
@@ -263,21 +284,19 @@ async def whatsapp_webhook(request: Request):
263
  try:
264
  obj = json.loads(raw)
265
  trivia_store[chat] = obj
266
- send_message(mid, chat, f"❓ {obj['question']}\nReply `/answer` to see the answer.")
267
  except:
268
  send_message(mid, chat, "Failed to generate trivia.")
269
  return {"success": True}
270
 
271
- # ANSWER
272
  if low == "/answer":
273
  if chat in trivia_store:
274
  ans = trivia_store.pop(chat)["answer"]
275
  send_message(mid, chat, f"πŸ’‘ Answer: {ans}")
276
  else:
277
- send_message(mid, chat, "No active trivia. Send `/trivia`.")
278
  return {"success": True}
279
 
280
- # MEME
281
  if low.startswith("/meme "):
282
  txt = body[len("/meme "):].strip()
283
  send_message(mid, chat, "🎨 Generating your meme...")
@@ -289,23 +308,21 @@ async def whatsapp_webhook(request: Request):
289
  })
290
  return {"success": True}
291
 
292
- # POLL
293
  if low.startswith("/poll "):
294
  parts = body[len("/poll "):].split("|")
295
  if len(parts) < 3:
296
- send_message(mid, chat, "Use `/poll Question|Option1|Option2[...]`")
297
  else:
298
  q = parts[0].strip()
299
  opts = [p.strip() for p in parts[1:]]
300
  votes = {i+1: 0 for i in range(len(opts))}
301
  polls[chat] = {"question": q, "options": opts, "votes": votes, "voters": {}}
302
  txt = f"πŸ“Š *Poll:* {q}\n" + "\n".join(
303
- f"{i+1}. {opt}" for i,opt in enumerate(opts)
304
  ) + "\n\nReply with the *option number* to vote."
305
  send_message(mid, chat, txt)
306
  return {"success": True}
307
 
308
- # VOTE in poll
309
  if chat in polls and body.isdigit():
310
  n = int(body)
311
  p = polls[chat]
@@ -318,37 +335,34 @@ async def whatsapp_webhook(request: Request):
318
  send_message(mid, chat, f"βœ… Vote recorded: {p['options'][n-1]}")
319
  return {"success": True}
320
 
321
- # POLL RESULTS
322
  if low == "/results":
323
  if chat in polls:
324
  p = polls[chat]
325
  txt = f"πŸ“Š *Results:* {p['question']}\n" + "\n".join(
326
- f"{i}. {opt}: {p['votes'][i]}" for i,opt in enumerate([""]+p["options"]) if i>0
327
  )
328
  send_message(mid, chat, txt)
329
  else:
330
  send_message(mid, chat, "No active poll.")
331
  return {"success": True}
332
 
333
- # END POLL
334
  if low == "/endpoll":
335
  if chat in polls:
336
  p = polls.pop(chat)
337
  txt = f"πŸ“Š *Final Results:* {p['question']}\n" + "\n".join(
338
- f"{i}. {opt}: {p['votes'][i]}" for i,opt in enumerate([""]+p["options"]) if i>0
339
  )
340
  send_message(mid, chat, txt)
341
  else:
342
  send_message(mid, chat, "No active poll.")
343
  return {"success": True}
344
 
345
- # IMAGINE (existing)
346
  if low.startswith("/imagine"):
347
  prompt = body[len("/imagine"):].strip()
348
  if not prompt:
349
- send_message(mid, chat, "Use `/imagine <prompt>`")
350
  else:
351
- send_message(mid, chat, "✨ Your image is being generated. \nPlease wait...")
352
  task_queue.put({
353
  "type": "image",
354
  "message_id": mid,
@@ -357,7 +371,7 @@ async def whatsapp_webhook(request: Request):
357
  })
358
  return {"success": True}
359
 
360
- # fallback β†’ voice reply
361
  task_queue.put({
362
  "type": "audio",
363
  "message_id": mid,
@@ -371,5 +385,7 @@ def index():
371
  return "Server is running!"
372
 
373
  if __name__ == "__main__":
 
 
374
  import uvicorn
375
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
5
  import queue
6
  import re
7
  import json
8
+ import time
9
  from fastapi import FastAPI, Request, HTTPException
10
  from fastapi.responses import PlainTextResponse, JSONResponse
11
  from FLUX import generate_image
12
  from VoiceReply import generate_voice_reply
13
+ from polLLM import generate_llm
14
 
15
  # Configure logging
16
  logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
17
 
18
  # Env vars
19
+ GREEN_API_URL = os.getenv("GREEN_API_URL")
20
+ GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
21
+ GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
22
+ GREEN_API_ID_INSTANCE= os.getenv("GREEN_API_ID_INSTANCE")
23
+ WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
24
+ BOT_STATUS_CHAT = "[email protected]" # Chat ID for system messages
25
+ image_dir = "/tmp/images"
26
+ audio_dir = "/tmp/audio"
27
 
28
  if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
29
  raise ValueError("Environment variables are not set properly")
 
35
 
36
  app = FastAPI()
37
 
38
+ # Global inactivity tracker
39
+ last_message_time = time.time()
40
+
41
+ # --- Startup Announcement ---
42
+ def send_startup_message():
43
+ if BOT_STATUS_CHAT:
44
+ msg = "🌟 Hi! I'm Eve, your friendly AI assistant. I'm now live and ready to help you with images, voice replies, and more!"
45
+ # Use a dummy message_id "startup"
46
+ send_message("startup", BOT_STATUS_CHAT, msg)
47
+ else:
48
+ logging.warning("BOT_STATUS_CHAT is not set; startup message not sent.")
49
+
50
+ # --- Inactivity Monitor ---
51
+ def inactivity_monitor():
52
+ global last_message_time
53
+ while True:
54
+ time.sleep(60) # check every minute
55
+ if time.time() - last_message_time >= 300: # 5 minutes inactivity
56
+ if BOT_STATUS_CHAT:
57
+ reminder = "⏰ I haven't heard from you in a while! I'm still here if you need anything."
58
+ send_message("inactivity", BOT_STATUS_CHAT, reminder)
59
+ # Reset the timer so we don't spam reminders
60
+ last_message_time = time.time()
61
+
62
+ threading.Thread(target=inactivity_monitor, daemon=True).start()
63
+
64
+ # --- Background Worker ---
65
  def worker():
66
  while True:
67
  task = task_queue.get()
 
83
  # --- send helpers ---
84
  def send_message(message_id, to_number, message, retries=3):
85
  chat_id = to_number if to_number.endswith("@g.us") else to_number
86
+ url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
87
  payload = {"chatId": chat_id, "message": message, "quotedMessageId": message_id}
88
  for i in range(retries):
89
  try:
 
91
  r.raise_for_status()
92
  return r.json()
93
  except requests.RequestException as e:
94
+ if i == retries - 1:
95
  return {"error": str(e)}
96
 
97
+ def send_image(message_id, to_number, image_path, caption="Here you go!", retries=3):
98
  chat_id = to_number if to_number.endswith("@g.us") else to_number
99
+ url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
100
  payload = {"chatId": chat_id, "caption": caption, "quotedMessageId": message_id}
101
+ files = [("file", ("image.jpg", open(image_path, "rb"), "image/jpeg"))]
102
  for i in range(retries):
103
  try:
104
  r = requests.post(url, data=payload, files=files)
105
  r.raise_for_status()
106
  return r.json()
107
  except requests.RequestException as e:
108
+ if i == retries - 1:
109
  return {"error": str(e)}
110
 
111
  def send_audio(message_id, to_number, audio_path, retries=3):
 
113
  chat_id = to_number if to_number.endswith("@g.us") else to_number
114
  if not os.path.exists(audio_path):
115
  logging.debug(f"Missing audio: {audio_path}")
116
+ url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
117
  payload = {"chatId": chat_id, "caption": "Here is your voice reply!", "quotedMessageId": message_id}
118
  try:
119
  with open(audio_path, "rb") as f:
 
124
  r.raise_for_status()
125
  return r.json()
126
  except requests.RequestException as e:
127
+ if i == retries - 1:
128
  return {"error": str(e)}
129
  except Exception as e:
130
  return {"error": str(e)}
131
 
132
+ # --- core response functions ---
133
  def response_text(message_id, chat_id, prompt):
134
  try:
135
  msg = generate_llm(prompt)
136
  send_message(message_id, chat_id, msg)
137
+ except Exception:
138
  send_message(message_id, chat_id, "Error processing your request.")
139
 
140
  def response_audio(message_id, chat_id, prompt):
 
156
  try:
157
  img, path, ret_prompt, url = generate_image(prompt, message_id, message_id, image_dir)
158
  if img:
159
+ # Split ret_prompt into paragraphs and italicize each
160
  formatted_ret_prompt = "\n\n".join(
161
  f"_{paragraph.strip()}_" for paragraph in ret_prompt.split("\n\n") if paragraph.strip()
162
  )
 
172
  logging.error("Error in handle_image_generation: %s", e)
173
  send_message(message_id, chat_id, "Error generating image.")
174
 
175
+ # --- Webhook ---
176
  @app.post("/whatsapp")
177
  async def whatsapp_webhook(request: Request):
178
+ global last_message_time
179
+ # Update last_message_time for each incoming message
180
+ last_message_time = time.time()
181
+
182
  auth = request.headers.get("Authorization", "").strip()
183
  if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
184
  raise HTTPException(403, "Unauthorized")
 
190
  return {"success": True}
191
 
192
  logging.debug("recv: %s", data)
193
+ sd = data["senderData"]
194
  chat = sd["chatId"]
195
+ mid = data["idMessage"]
196
  sender_jid = sd.get("sender")
197
 
198
  md = data.get("messageData", {})
 
199
  if md.get("typeMessage") == "quotedMessage" or "quotedMessage" in md:
200
  logging.debug("skip native quotedMessage")
201
  return {"success": True}
202
 
 
203
  if "textMessageData" in md:
204
+ body = md["textMessageData"].get("textMessage", "").strip()
205
+ ctx = md["textMessageData"].get("contextInfo", {})
206
  elif "extendedTextMessageData" in md:
207
+ body = md["extendedTextMessageData"].get("text", "").strip()
208
+ ctx = md["extendedTextMessageData"].get("contextInfo", {})
209
  else:
210
  return {"success": True}
211
 
 
212
  if ctx.get("mentionedJid") or ctx.get("mentionedJidList"):
213
  return {"success": True}
214
  if chat.endswith("@g.us") and re.search(r"@\d+", body):
215
  return {"success": True}
216
 
 
217
  low = body.lower()
218
 
219
+ # --- New Commands ---
220
  if low == "/help":
221
  help_text = (
222
+ "πŸ€– *Hi there, I'm Eve!* Here are the commands you can use:\n\n"
223
+ "β€’ `/help` – Show this help message.\n"
224
+ "β€’ `/summarize <text>` – Get a quick summary of your text.\n"
225
+ "β€’ `/translate <language>|<text>` – Translate text to your chosen language.\n"
226
+ "β€’ `/joke` – Enjoy a random, funny joke.\n"
227
+ "β€’ `/weather <location>` – Get the current weather for a location.\n"
228
+ "β€’ `/inspire` – Receive a short inspirational quote.\n"
229
+ "β€’ `/trivia` – Start a new trivia question.\n"
230
+ "β€’ `/answer` – Reveal the answer to the trivia.\n"
231
+ "β€’ `/meme <text>` – Generate a fun meme image.\n"
232
+ "β€’ `/poll <Question>|<Option1>|<Option2>|…` – Create a poll.\n"
233
+ "β€’ `/results` – See current poll results.\n"
234
+ "β€’ `/endpoll` – End the poll and show final results.\n"
235
+ "β€’ `/imagine <prompt>` – Generate an image from your prompt.\n\n"
236
+ "If you send any other text, I'll reply with a voice message. I'm here to help, so don't hesitate to ask!"
237
  )
238
  send_message(mid, chat, help_text)
239
  return {"success": True}
240
 
 
241
  if low.startswith("/summarize "):
242
  txt = body[len("/summarize "):].strip()
243
  summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{txt}")
244
  send_message(mid, chat, summary)
245
  return {"success": True}
246
 
 
247
  if low.startswith("/translate "):
248
  part = body[len("/translate "):]
249
  if "|" not in part:
250
+ send_message(mid, chat, "Please use `/translate <language>|<text>`")
251
  else:
252
+ lang, txt = part.split("|", 1)
253
  resp = generate_llm(f"Translate the following into {lang.strip()}:\n\n{txt.strip()}")
254
  send_message(mid, chat, resp)
255
  return {"success": True}
256
 
 
257
  if low == "/joke":
258
  try:
259
  joke = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
 
262
  send_message(mid, chat, generate_llm("Tell me a short, funny joke."))
263
  return {"success": True}
264
 
 
265
  if low.startswith("/weather "):
266
  loc = body[len("/weather "):].strip().replace(" ", "+")
267
  try:
 
271
  send_message(mid, chat, "Could not fetch weather.")
272
  return {"success": True}
273
 
 
274
  if low == "/inspire":
275
  quote = generate_llm("Give me a short inspirational quote.")
276
  send_message(mid, chat, f"✨ {quote}")
277
  return {"success": True}
278
 
 
279
  if low == "/trivia":
280
  raw = generate_llm(
281
  "Generate a trivia question and answer in JSON: "
 
284
  try:
285
  obj = json.loads(raw)
286
  trivia_store[chat] = obj
287
+ send_message(mid, chat, f"❓ {obj['question']}\nReply with `/answer` to see the answer.")
288
  except:
289
  send_message(mid, chat, "Failed to generate trivia.")
290
  return {"success": True}
291
 
 
292
  if low == "/answer":
293
  if chat in trivia_store:
294
  ans = trivia_store.pop(chat)["answer"]
295
  send_message(mid, chat, f"πŸ’‘ Answer: {ans}")
296
  else:
297
+ send_message(mid, chat, "No active trivia. Send `/trivia` to start one.")
298
  return {"success": True}
299
 
 
300
  if low.startswith("/meme "):
301
  txt = body[len("/meme "):].strip()
302
  send_message(mid, chat, "🎨 Generating your meme...")
 
308
  })
309
  return {"success": True}
310
 
 
311
  if low.startswith("/poll "):
312
  parts = body[len("/poll "):].split("|")
313
  if len(parts) < 3:
314
+ send_message(mid, chat, "Please use `/poll Question|Option1|Option2|...`")
315
  else:
316
  q = parts[0].strip()
317
  opts = [p.strip() for p in parts[1:]]
318
  votes = {i+1: 0 for i in range(len(opts))}
319
  polls[chat] = {"question": q, "options": opts, "votes": votes, "voters": {}}
320
  txt = f"πŸ“Š *Poll:* {q}\n" + "\n".join(
321
+ f"{i+1}. {opt}" for i, opt in enumerate(opts)
322
  ) + "\n\nReply with the *option number* to vote."
323
  send_message(mid, chat, txt)
324
  return {"success": True}
325
 
 
326
  if chat in polls and body.isdigit():
327
  n = int(body)
328
  p = polls[chat]
 
335
  send_message(mid, chat, f"βœ… Vote recorded: {p['options'][n-1]}")
336
  return {"success": True}
337
 
 
338
  if low == "/results":
339
  if chat in polls:
340
  p = polls[chat]
341
  txt = f"πŸ“Š *Results:* {p['question']}\n" + "\n".join(
342
+ f"{i}. {opt}: {p['votes'][i]}" for i, opt in enumerate([""] + p["options"]) if i > 0
343
  )
344
  send_message(mid, chat, txt)
345
  else:
346
  send_message(mid, chat, "No active poll.")
347
  return {"success": True}
348
 
 
349
  if low == "/endpoll":
350
  if chat in polls:
351
  p = polls.pop(chat)
352
  txt = f"πŸ“Š *Final Results:* {p['question']}\n" + "\n".join(
353
+ f"{i}. {opt}: {p['votes'][i]}" for i, opt in enumerate([""] + p["options"]) if i > 0
354
  )
355
  send_message(mid, chat, txt)
356
  else:
357
  send_message(mid, chat, "No active poll.")
358
  return {"success": True}
359
 
 
360
  if low.startswith("/imagine"):
361
  prompt = body[len("/imagine"):].strip()
362
  if not prompt:
363
+ send_message(mid, chat, "Please use `/imagine <prompt>` to generate an image.")
364
  else:
365
+ send_message(mid, chat, "✨ Your image is being generated. Please wait...")
366
  task_queue.put({
367
  "type": "image",
368
  "message_id": mid,
 
371
  })
372
  return {"success": True}
373
 
374
+ # Fallback: voice reply for any other text
375
  task_queue.put({
376
  "type": "audio",
377
  "message_id": mid,
 
385
  return "Server is running!"
386
 
387
  if __name__ == "__main__":
388
+ # Send startup message on launch
389
+ send_startup_message()
390
  import uvicorn
391
  uvicorn.run(app, host="0.0.0.0", port=7860)
config.yaml CHANGED
@@ -3,7 +3,7 @@ config:
3
  model: koboldcpp/HF_SPACE_Tiefighter-13B
4
  system_prompt: |-
5
  You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
6
- You generate images and voice replies, and support these commands:
7
  β€’ /help β€” list all commands
8
  β€’ /imagine <prompt> β€” generate an image
9
  β€’ /summarize <text>
@@ -17,7 +17,7 @@ config:
17
  β€’ /poll <Q>|<opt1>|<opt2>|…
18
  β€’ /results
19
  β€’ /endpoll
20
- Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it. For any other message, respond via voice.
21
  char: Eve
22
 
23
  SD:
 
3
  model: koboldcpp/HF_SPACE_Tiefighter-13B
4
  system_prompt: |-
5
  You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
6
+ You generate images and voice, text replies, and support these commands:
7
  β€’ /help β€” list all commands
8
  β€’ /imagine <prompt> β€” generate an image
9
  β€’ /summarize <text>
 
17
  β€’ /poll <Q>|<opt1>|<opt2>|…
18
  β€’ /results
19
  β€’ /endpoll
20
+ Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it. For any other message, respond via voice or text.
21
  char: Eve
22
 
23
  SD:
polLLM.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import urllib.parse
4
+ from dotenv import load_dotenv
5
+ from utils import read_config
6
+ import random
7
+
8
+ load_dotenv()
9
+
10
+ def pre_process():
11
+ # Read the configuration and substitute the character placeholder
12
+ config = read_config()
13
+ system_prompt = config['llm']['system_prompt']
14
+ char = config['llm']['char']
15
+ return system_prompt.replace("{char}", char)
16
+
17
+ def generate_llm(prompt):
18
+ system_prompt = pre_process()
19
+ # Encode the user prompt and system prompt for URL safety
20
+ encoded_prompt = urllib.parse.quote(prompt)
21
+ encoded_system = urllib.parse.quote(system_prompt)
22
+ # Build the GET request URL for Pollinations' text API
23
+ randomSeed = random.randint(0, 999999)
24
+ url = f"https://text.pollinations.ai/{encoded_prompt}?model=openai&private=true&seed={randomSeed}&system={encoded_system}"
25
+
26
+ try:
27
+ response = requests.get(url, timeout=30)
28
+ response.raise_for_status()
29
+ # Return the generated text (stripping any extra whitespace)
30
+ return response.text.strip()
31
+ except Exception as e:
32
+ return f"Error: {str(e)}"
33
+
34
+ # Example usage (can be removed or commented out in production):
35
+ if __name__ == "__main__":
36
+ sample_prompt = "What is the capital of France?"
37
+ print("Response:", generate_llm(sample_prompt))