import logging import time from threading import Thread from whatsapp_bridge.bot import ApplicationBuilder, MessageHandler, TypeHandler from whatsapp_bridge.bot import TextFilter from groq import Groq import os import uvicorn from fastapi import FastAPI # Configure logging logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO ) logger = logging.getLogger(__name__) # Initialize Groq client client = Groq(api_key=os.environ.get("GROQ_API_KEY")) # System prompt to define assistant behavior system_prompt = """ You are Lucy, personal assistant of Naksh. For the first incoming message reply, Hi! I am Lucy, Personal Assistant of Naksh. He is currently busy and will message you soon. The second message should be different. Every message should be different and also always be respectful. you can even chat with the sender From the second message you can handle the conversation by yourself If someone asks about me tell them he is busy in some work. """ # Global sender instance sender = None class WhatsAppSender: def __init__(self, application): self.application = application self.client = None def get_client(self): if self.client is None and hasattr(self.application, 'listener'): self.client = self.application.listener.client return self.client def send_message(self, jid, message): client = self.get_client() if client is None: return False, "WhatsApp client not available" try: for method_name in ['send_message', 'send_text', 'send']: if hasattr(client, method_name): getattr(client, method_name)(jid, message) return True, "Message sent successfully" return False, "No valid send method found on client" except Exception as e: return False, str(e) async def ai_handler(update, context): global sender if not (update.message and update.message.get("content")): return chat_jid = update.message.get("chat_jid") incoming = update.message.get("content") logger.info(f"Received from {chat_jid}: '{incoming}'") messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": incoming} ] try: response = client.chat.completions.create( messages=messages, model="llama-3.3-70b-versatile", stream=False, ) ai_text = response.choices[0].message.content except Exception as e: logger.error(f"Groq AI error: {e}") ai_text = "Sorry, I'm having trouble responding right now." success, result = sender.send_message(chat_jid, ai_text) if success: logger.info(f"Sent to {chat_jid}: '{ai_text}'") else: logger.error(f"Failed to send message to {chat_jid}: {result}") def cli_send_loop(sender_instance): while True: try: command = input("\nEnter command (/send ): ").strip() except EOFError: break # no CLI in non-interactive env if command.startswith("/send "): parts = command.split(" ", 2) if len(parts) < 3: print("Invalid format. Use: /send ") continue _, jid, message = parts success, result = sender_instance.send_message(jid, message) if success: logger.info(f"Message sent to {jid}: '{message}'") else: logger.error(f"Failed to send: {result}") print(f"Error: {result}") # FastAPI setup app = FastAPI() @app.get("/ping") async def ping(): return {"message": "pong"} def run_fastapi(port=7860): uvicorn.run(app, host="0.0.0.0", port=port, log_level="info") def main(): global sender logger.info("Starting WhatsApp AI bot...") application = ApplicationBuilder().build() sender = WhatsAppSender(application) application.add_handler(TypeHandler(lambda u, c: logger.debug(f"Update: {u}")), group=-1) application.add_handler(MessageHandler(TextFilter(), ai_handler)) cli_thread = Thread(target=cli_send_loop, args=(sender,), daemon=True) cli_thread.start() # Start FastAPI in a separate thread fastapi_thread = Thread(target=run_fastapi, daemon=True) fastapi_thread.start() application.run_polling() if __name__ == "__main__": main()