Chandima Prabhath commited on
Commit
7995625
Β·
1 Parent(s): e3c4ecf

Refactor system prompt for clarity and conciseness; update environment variable handling and improve error logging in app.py

Browse files
Files changed (2) hide show
  1. app.py +256 -120
  2. config.yaml +19 -10
app.py CHANGED
@@ -4,230 +4,366 @@ import requests
4
  import logging
5
  import queue
6
  import re
 
7
  from fastapi import FastAPI, Request, HTTPException
8
  from fastapi.responses import PlainTextResponse, JSONResponse
9
  from FLUX import generate_image
10
  from VoiceReply import generate_voice_reply
11
  from llm import generate_llm
12
 
13
- # Configure logging for debugging
14
  logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
15
 
16
- # Environment variables
17
- GREEN_API_URL = os.getenv("GREEN_API_URL")
18
  GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
19
- GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
20
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
21
- WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
22
- image_dir = "/tmp/images"
23
- audio_dir = "/tmp/audio"
24
 
25
  if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
26
  raise ValueError("Environment variables are not set properly")
27
 
28
- # Task queue for processing messages sequentially
29
- task_queue = queue.Queue()
 
 
30
 
31
  app = FastAPI()
32
 
33
- # Worker thread to process queued tasks one by one
34
  def worker():
35
  while True:
36
  task = task_queue.get()
37
  try:
38
- typ = task.get("type")
39
- mid = task.get("message_id")
40
- cid = task.get("chat_id")
41
  if typ == "image":
42
- handle_image_generation(mid, cid, task.get("prompt"))
43
  elif typ == "audio":
44
- response_audio(mid, cid, task.get("prompt"))
45
  except Exception as e:
46
- logging.error(f"Error processing task {task}: {e}")
47
  finally:
48
  task_queue.task_done()
49
 
50
- # Start the worker thread
51
  threading.Thread(target=worker, daemon=True).start()
52
 
 
53
  def send_message(message_id, to_number, message, retries=3):
54
- chat_id = to_number if to_number.endswith('@g.us') else to_number
55
- url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
56
- payload = {
57
- "chatId": chat_id,
58
- "message": message,
59
- "quotedMessageId": message_id,
60
- }
61
- for attempt in range(retries):
62
  try:
63
  r = requests.post(url, json=payload)
64
  r.raise_for_status()
65
  return r.json()
66
  except requests.RequestException as e:
67
- if attempt < retries - 1:
68
- continue
69
- return {"error": str(e)}
70
 
71
  def send_image(message_id, to_number, image_path, retries=3):
72
- chat_id = to_number if to_number.endswith('@g.us') else to_number
73
- url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
74
- payload = {'chatId': chat_id, 'caption': 'Here you go!', 'quotedMessageId': message_id}
75
- files = [('file', ('image.jpg', open(image_path, 'rb'), 'image/jpeg'))]
76
- for attempt in range(retries):
77
  try:
78
  r = requests.post(url, data=payload, files=files)
79
  r.raise_for_status()
80
  return r.json()
81
  except requests.RequestException as e:
82
- if attempt < retries - 1:
83
- continue
84
- return {"error": str(e)}
85
 
86
  def send_audio(message_id, to_number, audio_path, retries=3):
87
- logging.debug("Entering send_audio")
88
- chat_id = to_number if to_number.endswith('@g.us') else to_number
89
  if not os.path.exists(audio_path):
90
- logging.debug(f"Audio file does not exist: {audio_path}")
91
- url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
92
- payload = {'chatId': chat_id, 'caption': 'Here is your voice reply!', 'quotedMessageId': message_id}
93
  try:
94
- with open(audio_path, 'rb') as f:
95
- files = [('file', ('audio.mp3', f, 'audio/mpeg'))]
96
- for attempt in range(retries):
97
  try:
98
- logging.debug(f"Attempt {attempt+1} to send audio")
99
  r = requests.post(url, data=payload, files=files)
100
- logging.debug(f"send_audio response: {r.status_code} {r.text}")
101
  r.raise_for_status()
102
  return r.json()
103
  except requests.RequestException as e:
104
- logging.debug(f"send_audio error on attempt {attempt+1}: {e}")
105
- if attempt < retries - 1:
106
- continue
107
- return {"error": str(e)}
108
  except Exception as e:
109
- logging.debug(f"Failed to open audio file: {e}")
110
  return {"error": str(e)}
111
 
 
112
  def response_text(message_id, chat_id, prompt):
113
  try:
114
  msg = generate_llm(prompt)
115
  send_message(message_id, chat_id, msg)
116
- except Exception:
117
- send_message(message_id, chat_id, "There was an error processing your request.")
118
 
119
  def response_audio(message_id, chat_id, prompt):
120
- logging.debug("Entering response_audio with prompt: %s", prompt)
121
  try:
122
  result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=audio_dir)
123
- logging.debug("generate_voice_reply result: %s", result)
124
  if result and result[0]:
125
  audio_path, _ = result
126
  send_audio(message_id, chat_id, audio_path)
127
  if os.path.exists(audio_path):
128
  os.remove(audio_path)
129
- logging.debug("Removed audio file: %s", audio_path)
130
  else:
131
- logging.debug("Falling back to text response")
132
  response_text(message_id, chat_id, prompt)
133
  except Exception as e:
134
- logging.debug("Exception in response_audio: %s", e)
135
- send_message(message_id, chat_id, "There was an error generating the audio. Please try again later.")
136
 
137
  def handle_image_generation(message_id, chat_id, prompt):
138
  try:
139
- image, image_path, returned_prompt, image_url = generate_image(prompt, message_id, message_id, image_dir)
140
- if image:
141
- send_image(message_id, chat_id, image_path)
142
  send_message(
143
  message_id, chat_id,
144
- f"Image generated successfully! View it here: {image_url}\n>{chr(8203)} _{returned_prompt}_"
145
  )
146
  else:
147
- send_message(message_id, chat_id, "Failed to generate image. Please try again later.")
148
- except Exception:
149
- send_message(message_id, chat_id, "There was an error generating the image. Please try again later.")
150
-
151
- @app.get("/", response_class=PlainTextResponse)
152
- def index():
153
- return "Server is running!"
154
 
 
155
  @app.post("/whatsapp")
156
  async def whatsapp_webhook(request: Request):
157
- # 1) Auth
158
- auth = request.headers.get('Authorization', '').strip()
159
  if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
160
  raise HTTPException(403, "Unauthorized")
161
-
162
- # 2) Parse JSON
163
  try:
164
  data = await request.json()
165
  except:
166
  return JSONResponse({"error": "Invalid JSON"}, status_code=400)
167
-
168
- # 3) Only handle incoming messages
169
- if data.get('typeWebhook') != 'incomingMessageReceived':
170
  return {"success": True}
171
 
172
- logging.debug(f"Received data: {data}")
 
 
 
 
173
 
174
- # 4) Extract core fields
175
- try:
176
- chat_id = data['senderData']['chatId']
177
- message_id = data['idMessage']
178
- message_data = data.get('messageData', {})
179
- except KeyError as e:
180
- return JSONResponse({"error": f"Missing key: {e}"}, status_code=200)
181
-
182
- # --- NEW: IGNORE any WhatsApp β€œquotedMessage” payload entirely ---
183
- if message_data.get('typeMessage') == 'quotedMessage' or 'quotedMessage' in message_data:
184
- logging.debug("Ignoring WhatsApp quotedMessage payload")
185
- return {"success": True}
186
- # --------------------------------------------------------------------
187
-
188
- # 5) Extract text body
189
- if 'textMessageData' in message_data:
190
- body = message_data['textMessageData'].get('textMessage', '').strip()
191
- ctx = message_data['textMessageData'].get('contextInfo', {})
192
- elif 'extendedTextMessageData' in message_data:
193
- body = message_data['extendedTextMessageData'].get('text', '').strip()
194
- ctx = message_data['extendedTextMessageData'].get('contextInfo', {})
195
  else:
196
  return {"success": True}
197
 
198
- # 6) IGNORE structured mentions
199
- if ctx.get('mentionedJid') or ctx.get('mentionedJidList'):
200
- logging.debug(f"Ignoring structured mention: {ctx.get('mentionedJid') or ctx.get('mentionedJidList')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  return {"success": True}
202
 
203
- # 7) IGNORE plain-text "@1234..." mentions in groups
204
- if chat_id.endswith('@g.us') and re.search(r'@\d+', body):
205
- logging.debug(f"Ignoring plain-text mention in body: {body}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  return {"success": True}
207
 
208
- # 8) Enqueue work
209
- if body.lower().startswith('/imagine'):
210
- prompt = body[len('/imagine'):].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  if not prompt:
212
- send_message(message_id, chat_id, "Please provide a prompt after /imagine.")
213
  else:
214
- send_message(message_id, chat_id, "Generating...")
215
  task_queue.put({
216
  "type": "image",
217
- "message_id": message_id,
218
- "chat_id": chat_id,
219
  "prompt": prompt
220
  })
221
- else:
222
- task_queue.put({
223
- "type": "audio",
224
- "message_id": message_id,
225
- "chat_id": chat_id,
226
- "prompt": body
227
- })
228
 
 
 
 
 
 
 
 
229
  return {"success": True}
230
 
231
- if __name__ == '__main__':
 
 
 
 
232
  import uvicorn
233
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
4
  import logging
5
  import queue
6
  import re
7
+ import json
8
  from fastapi import FastAPI, Request, HTTPException
9
  from fastapi.responses import PlainTextResponse, JSONResponse
10
  from FLUX import generate_image
11
  from VoiceReply import generate_voice_reply
12
  from llm import generate_llm
13
 
14
+ # Configure logging
15
  logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s")
16
 
17
+ # Env vars
18
+ GREEN_API_URL = os.getenv("GREEN_API_URL")
19
  GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
20
+ GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
21
  GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
22
+ WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
23
+ image_dir = "/tmp/images"
24
+ audio_dir = "/tmp/audio"
25
 
26
  if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
27
  raise ValueError("Environment variables are not set properly")
28
 
29
+ # Queues & in‑memory stores
30
+ task_queue = queue.Queue()
31
+ trivia_store = {} # chat_id β†’ {"question":…, "answer":…}
32
+ polls = {} # chat_id β†’ {"question":…, "options":[…], "votes":{1:0…}, "voters":{jid:opt}}
33
 
34
  app = FastAPI()
35
 
36
+ # Background worker
37
  def worker():
38
  while True:
39
  task = task_queue.get()
40
  try:
41
+ typ = task["type"]
42
+ mid = task["message_id"]
43
+ cid = task["chat_id"]
44
  if typ == "image":
45
+ handle_image_generation(mid, cid, task["prompt"])
46
  elif typ == "audio":
47
+ response_audio(mid, cid, task["prompt"])
48
  except Exception as e:
49
+ logging.error(f"Error processing {task}: {e}")
50
  finally:
51
  task_queue.task_done()
52
 
 
53
  threading.Thread(target=worker, daemon=True).start()
54
 
55
+ # --- send helpers ---
56
  def send_message(message_id, to_number, message, retries=3):
57
+ chat_id = to_number if to_number.endswith("@g.us") else to_number
58
+ url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
59
+ payload = {"chatId": chat_id, "message": message, "quotedMessageId": message_id}
60
+ for i in range(retries):
 
 
 
 
61
  try:
62
  r = requests.post(url, json=payload)
63
  r.raise_for_status()
64
  return r.json()
65
  except requests.RequestException as e:
66
+ if i == retries-1:
67
+ return {"error": str(e)}
 
68
 
69
  def send_image(message_id, to_number, image_path, retries=3):
70
+ chat_id = to_number if to_number.endswith("@g.us") else to_number
71
+ url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
72
+ payload = {"chatId": chat_id, "caption": "Here you go!", "quotedMessageId": message_id}
73
+ files = [("file", ("image.jpg", open(image_path, "rb"), "image/jpeg"))]
74
+ for i in range(retries):
75
  try:
76
  r = requests.post(url, data=payload, files=files)
77
  r.raise_for_status()
78
  return r.json()
79
  except requests.RequestException as e:
80
+ if i == retries-1:
81
+ return {"error": str(e)}
 
82
 
83
  def send_audio(message_id, to_number, audio_path, retries=3):
84
+ logging.debug("send_audio")
85
+ chat_id = to_number if to_number.endswith("@g.us") else to_number
86
  if not os.path.exists(audio_path):
87
+ logging.debug(f"Missing audio: {audio_path}")
88
+ url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
89
+ payload = {"chatId": chat_id, "caption": "Here is your voice reply!", "quotedMessageId": message_id}
90
  try:
91
+ with open(audio_path, "rb") as f:
92
+ files = [("file", ("audio.mp3", f, "audio/mpeg"))]
93
+ for i in range(retries):
94
  try:
 
95
  r = requests.post(url, data=payload, files=files)
 
96
  r.raise_for_status()
97
  return r.json()
98
  except requests.RequestException as e:
99
+ if i == retries-1:
100
+ return {"error": str(e)}
 
 
101
  except Exception as e:
 
102
  return {"error": str(e)}
103
 
104
+ # --- core response fns ---
105
  def response_text(message_id, chat_id, prompt):
106
  try:
107
  msg = generate_llm(prompt)
108
  send_message(message_id, chat_id, msg)
109
+ except:
110
+ send_message(message_id, chat_id, "Error processing your request.")
111
 
112
  def response_audio(message_id, chat_id, prompt):
113
+ logging.debug("response_audio prompt=%s", prompt)
114
  try:
115
  result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=audio_dir)
 
116
  if result and result[0]:
117
  audio_path, _ = result
118
  send_audio(message_id, chat_id, audio_path)
119
  if os.path.exists(audio_path):
120
  os.remove(audio_path)
 
121
  else:
 
122
  response_text(message_id, chat_id, prompt)
123
  except Exception as e:
124
+ logging.debug("audio error: %s", e)
125
+ send_message(message_id, chat_id, "Error generating audio. Try again later.")
126
 
127
  def handle_image_generation(message_id, chat_id, prompt):
128
  try:
129
+ img, path, ret_prompt, url = generate_image(prompt, message_id, message_id, image_dir)
130
+ if img:
131
+ send_image(message_id, chat_id, path)
132
  send_message(
133
  message_id, chat_id,
134
+ f"βœ… Image ready: {url}\n>{chr(8203)} _{ret_prompt}_"
135
  )
136
  else:
137
+ send_message(message_id, chat_id, "Image generation failed.")
138
+ except:
139
+ send_message(message_id, chat_id, "Error generating image.")
 
 
 
 
140
 
141
+ # --- webhook ---
142
  @app.post("/whatsapp")
143
  async def whatsapp_webhook(request: Request):
144
+ # auth & parse
145
+ auth = request.headers.get("Authorization", "").strip()
146
  if auth != f"Bearer {WEBHOOK_AUTH_TOKEN}":
147
  raise HTTPException(403, "Unauthorized")
 
 
148
  try:
149
  data = await request.json()
150
  except:
151
  return JSONResponse({"error": "Invalid JSON"}, status_code=400)
152
+ if data.get("typeWebhook") != "incomingMessageReceived":
 
 
153
  return {"success": True}
154
 
155
+ logging.debug("recv: %s", data)
156
+ sd = data["senderData"]
157
+ chat = sd["chatId"]
158
+ mid = data["idMessage"]
159
+ sender_jid = sd.get("sender")
160
 
161
+ md = data.get("messageData", {})
162
+ # drop any WhatsApp native quoted‐message event
163
+ if md.get("typeMessage") == "quotedMessage" or "quotedMessage" in md:
164
+ logging.debug("skip native quotedMessage")
165
+ return {"success": True}
166
+
167
+ # extract text + contextInfo
168
+ if "textMessageData" in md:
169
+ body = md["textMessageData"].get("textMessage","").strip()
170
+ ctx = md["textMessageData"].get("contextInfo",{})
171
+ elif "extendedTextMessageData" in md:
172
+ body = md["extendedTextMessageData"].get("text","").strip()
173
+ ctx = md["extendedTextMessageData"].get("contextInfo",{})
 
 
 
 
 
 
 
 
174
  else:
175
  return {"success": True}
176
 
177
+ # ignore native mentions & plain @123
178
+ if ctx.get("mentionedJid") or ctx.get("mentionedJidList"):
179
+ return {"success": True}
180
+ if chat.endswith("@g.us") and re.search(r"@\d+", body):
181
+ return {"success": True}
182
+
183
+ # β€”β€”β€” NEW COMMANDS β€”β€”β€”
184
+ low = body.lower()
185
+
186
+ # HELP
187
+ if low == "/help":
188
+ help_text = (
189
+ "πŸ€– *Commands*: \n"
190
+ "/help\n"
191
+ "/summarize <text>\n"
192
+ "/translate <lang>|<text>\n"
193
+ "/joke\n"
194
+ "/weather <location>\n"
195
+ "/inspire\n"
196
+ "/trivia β†’ new trivia\n"
197
+ "/answer β†’ reveal answer\n"
198
+ "/meme <text>\n"
199
+ "/poll <Q>|<opt1>|<opt2>|…\n"
200
+ "/results\n"
201
+ "/endpoll\n"
202
+ "/imagine <prompt>\n"
203
+ "Or just send any text and I’ll reply by voice!"
204
+ )
205
+ send_message(mid, chat, help_text)
206
  return {"success": True}
207
 
208
+ # SUMMARIZE
209
+ if low.startswith("/summarize "):
210
+ txt = body[len("/summarize "):].strip()
211
+ summary = generate_llm(f"Summarize this text in one short paragraph:\n\n{txt}")
212
+ send_message(mid, chat, summary)
213
+ return {"success": True}
214
+
215
+ # TRANSLATE
216
+ if low.startswith("/translate "):
217
+ part = body[len("/translate "):]
218
+ if "|" not in part:
219
+ send_message(mid, chat, "Use `/translate Language|Text`")
220
+ else:
221
+ lang, txt = part.split("|",1)
222
+ resp = generate_llm(f"Translate the following into {lang.strip()}:\n\n{txt.strip()}")
223
+ send_message(mid, chat, resp)
224
+ return {"success": True}
225
+
226
+ # JOKE
227
+ if low == "/joke":
228
+ try:
229
+ joke = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
230
+ send_message(mid, chat, f"{joke['setup']}\n\n{joke['punchline']}")
231
+ except:
232
+ send_message(mid, chat, generate_llm("Tell me a short, funny joke."))
233
+ return {"success": True}
234
+
235
+ # WEATHER
236
+ if low.startswith("/weather "):
237
+ loc = body[len("/weather "):].strip().replace(" ", "+")
238
+ try:
239
+ w = requests.get(f"http://wttr.in/{loc}?format=3", timeout=5).text
240
+ send_message(mid, chat, w)
241
+ except:
242
+ send_message(mid, chat, "Could not fetch weather.")
243
+ return {"success": True}
244
+
245
+ # INSPIRE
246
+ if low == "/inspire":
247
+ quote = generate_llm("Give me a short inspirational quote.")
248
+ send_message(mid, chat, f"✨ {quote}")
249
+ return {"success": True}
250
+
251
+ # TRIVIA
252
+ if low == "/trivia":
253
+ raw = generate_llm(
254
+ "Generate a trivia question and answer in JSON: "
255
+ "{\"question\":\"...\",\"answer\":\"...\"}"
256
+ )
257
+ try:
258
+ obj = json.loads(raw)
259
+ trivia_store[chat] = obj
260
+ send_message(mid, chat, f"❓ {obj['question']}\nReply `/answer` to see the answer.")
261
+ except:
262
+ send_message(mid, chat, "Failed to generate trivia.")
263
  return {"success": True}
264
 
265
+ # ANSWER
266
+ if low == "/answer":
267
+ if chat in trivia_store:
268
+ ans = trivia_store.pop(chat)["answer"]
269
+ send_message(mid, chat, f"πŸ’‘ Answer: {ans}")
270
+ else:
271
+ send_message(mid, chat, "No active trivia. Send `/trivia`.")
272
+ return {"success": True}
273
+
274
+ # MEME
275
+ if low.startswith("/meme "):
276
+ txt = body[len("/meme "):].strip()
277
+ send_message(mid, chat, "🎨 Generating your meme...")
278
+ task_queue.put({
279
+ "type": "image",
280
+ "message_id": mid,
281
+ "chat_id": chat,
282
+ "prompt": f"meme template with text: {txt}"
283
+ })
284
+ return {"success": True}
285
+
286
+ # POLL
287
+ if low.startswith("/poll "):
288
+ parts = body[len("/poll "):].split("|")
289
+ if len(parts) < 3:
290
+ send_message(mid, chat, "Use `/poll Question|Option1|Option2[...]`")
291
+ else:
292
+ q = parts[0].strip()
293
+ opts = [p.strip() for p in parts[1:]]
294
+ votes = {i+1: 0 for i in range(len(opts))}
295
+ polls[chat] = {"question": q, "options": opts, "votes": votes, "voters": {}}
296
+ txt = f"πŸ“Š *Poll:* {q}\n" + "\n".join(
297
+ f"{i+1}. {opt}" for i,opt in enumerate(opts)
298
+ ) + "\n\nReply with the *option number* to vote."
299
+ send_message(mid, chat, txt)
300
+ return {"success": True}
301
+
302
+ # VOTE in poll
303
+ if chat in polls and body.isdigit():
304
+ n = int(body)
305
+ p = polls[chat]
306
+ if 1 <= n <= len(p["options"]):
307
+ prev = p["voters"].get(sender_jid)
308
+ if prev:
309
+ p["votes"][prev] -= 1
310
+ p["votes"][n] += 1
311
+ p["voters"][sender_jid] = n
312
+ send_message(mid, chat, f"βœ… Vote recorded: {p['options'][n-1]}")
313
+ return {"success": True}
314
+
315
+ # POLL RESULTS
316
+ if low == "/results":
317
+ if chat in polls:
318
+ p = polls[chat]
319
+ txt = f"πŸ“Š *Results:* {p['question']}\n" + "\n".join(
320
+ f"{i}. {opt}: {p['votes'][i]}" for i,opt in enumerate([""]+p["options"]) if i>0
321
+ )
322
+ send_message(mid, chat, txt)
323
+ else:
324
+ send_message(mid, chat, "No active poll.")
325
+ return {"success": True}
326
+
327
+ # END POLL
328
+ if low == "/endpoll":
329
+ if chat in polls:
330
+ p = polls.pop(chat)
331
+ txt = f"πŸ“Š *Final Results:* {p['question']}\n" + "\n".join(
332
+ f"{i}. {opt}: {p['votes'][i]}" for i,opt in enumerate([""]+p["options"]) if i>0
333
+ )
334
+ send_message(mid, chat, txt)
335
+ else:
336
+ send_message(mid, chat, "No active poll.")
337
+ return {"success": True}
338
+
339
+ # IMAGINE (existing)
340
+ if low.startswith("/imagine"):
341
+ prompt = body[len("/imagine"):].strip()
342
  if not prompt:
343
+ send_message(mid, chat, "Use `/imagine <prompt>`")
344
  else:
345
+ send_message(mid, chat, "✨ Generating image...")
346
  task_queue.put({
347
  "type": "image",
348
+ "message_id": mid,
349
+ "chat_id": chat,
350
  "prompt": prompt
351
  })
352
+ return {"success": True}
 
 
 
 
 
 
353
 
354
+ # fallback β†’ voice reply
355
+ task_queue.put({
356
+ "type": "audio",
357
+ "message_id": mid,
358
+ "chat_id": chat,
359
+ "prompt": body
360
+ })
361
  return {"success": True}
362
 
363
+ @app.get("/", response_class=PlainTextResponse)
364
+ def index():
365
+ return "Server is running!"
366
+
367
+ if __name__ == "__main__":
368
  import uvicorn
369
  uvicorn.run(app, host="0.0.0.0", port=7860)
config.yaml CHANGED
@@ -1,17 +1,25 @@
1
  config:
2
  llm:
3
  model: koboldcpp/HF_SPACE_Tiefighter-13B
4
- system_prompt: |
5
- Hey there! I'm {char}, your friendly AI specializing in creating images. You can reach me through various platforms:
6
- - **Telegram Bot**: Chat with me privately on [Telegram](https://t.me/eve_uni_bot) for personalized image requests.
7
- - **WhatsApp Group**: Join our [WhatsApp group](https://chat.whatsapp.com/CzPNw1lZKwaGlwi4YBkV2x) to share your image ideas with the community. I'll generate and send images directly to the group chat.
8
- - **Telegram Group**: Join our creative community on [Telegram](https://t.me/+p-0FdsI00tMxZjI1) for collaborative image requests. Share your ideas, and I'll bring them to life right in the group chat.
9
- **How to Request an Image**:
10
- Simply use the "/imagine" command followed by your request. For example, "/imagine orange cat, photorealistic". The more details you provide about colors, styles, and any specific elements, the better I can create exactly what you're imagining.
11
- If you're unsure how to phrase your request or encounter any issues, don't hesitate to ask for help. I'm here to guide you through the process and make sure you get the images you need.
12
- If you make a spelling mistake in your command, I'll gently let you know so you can correct it easily. Feel free to chat with me, ask questions, or just say helloβ€”I'm here to provide friendly responses and support.
13
- Whether you're new to image creation or a seasoned user, I'm dedicated to making your experience smooth and enjoyable. Let's work together to create stunning visuals!
 
 
 
 
 
 
 
14
  char: Eve
 
15
  SD:
16
  steps: 30
17
  negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
@@ -20,6 +28,7 @@ config:
20
  sampler_name: DPM++ 2M
21
  cfg_scale: 7.0
22
  imgbb_upload: false
 
23
  app:
24
  whatsapp_bot_enabled: true
25
  telegram_bot_enabled: true
 
1
  config:
2
  llm:
3
  model: koboldcpp/HF_SPACE_Tiefighter-13B
4
+ system_prompt: |-
5
+ You are {char}, a sweet and helpful AI assistant in Telegram and WhatsApp.
6
+ You generate images and voice replies, and support these commands:
7
+ β€’ /help β€” list all commands
8
+ β€’ /imagine <prompt> β€” generate an image
9
+ β€’ /summarize <text>
10
+ β€’ /translate <lang>|<text>
11
+ β€’ /joke
12
+ β€’ /weather <location>
13
+ β€’ /inspire
14
+ β€’ /trivia
15
+ β€’ /answer
16
+ β€’ /meme <text>
17
+ β€’ /poll <Q>|<opt1>|<opt2>|…
18
+ β€’ /results
19
+ β€’ /endpoll
20
+ Use a concise, friendly tone. If a command is malformed, gently ask the user to correct it. For any other message, respond via voice.
21
  char: Eve
22
+
23
  SD:
24
  steps: 30
25
  negative_prompt: low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
 
28
  sampler_name: DPM++ 2M
29
  cfg_scale: 7.0
30
  imgbb_upload: false
31
+
32
  app:
33
  whatsapp_bot_enabled: true
34
  telegram_bot_enabled: true