Tonic commited on
Commit
9f1220f
·
unverified ·
1 Parent(s): 397b7de

add persistence

Browse files
Files changed (1) hide show
  1. app.py +27 -27
app.py CHANGED
@@ -3,6 +3,12 @@ import discord
3
  from gradio_client import Client
4
  from huggingface_hub import InferenceClient
5
  import os
 
 
 
 
 
 
6
 
7
  # Get tokens from environment variables (Hugging Face Spaces secrets)
8
  TOKEN = os.getenv("DISCORD_TOKEN")
@@ -25,11 +31,7 @@ hf_client = InferenceClient(api_key=HF_TOKEN)
25
  # Function to process message and get response
26
  async def get_ai_response(message_content):
27
  try:
28
- messages = [
29
- {"role": "user", "content": message_content}
30
- ]
31
-
32
- # Create a streaming response
33
  response = ""
34
  stream = hf_client.chat.completions.create(
35
  model="Qwen/Qwen2.5-72B-Instruct",
@@ -39,44 +41,29 @@ async def get_ai_response(message_content):
39
  top_p=0.7,
40
  stream=True
41
  )
42
-
43
- # Collect the streamed response
44
  for chunk in stream:
45
  content = chunk.choices[0].delta.content
46
  if content:
47
  response += content
48
-
49
  return response if response else "I couldn't generate a response."
50
-
51
  except Exception as e:
52
  return f"An error occurred: {str(e)}"
53
 
54
  @client.event
55
  async def on_ready():
56
- print(f'We have logged in as {client.user}')
57
 
58
  @client.event
59
  async def on_message(message):
60
- # Ignore messages from the bot itself
61
  if message.author == client.user:
62
  return
63
-
64
- # Check if the bot is mentioned in the message
65
  if client.user in message.mentions:
66
- # Extract the message content without the bot mention
67
  clean_message = message.content.replace(f"<@{client.user.id}>", "").strip()
68
-
69
  if not clean_message:
70
  await message.channel.send("Please provide some text for me to respond to!")
71
  return
72
-
73
- # Send initial response to show bot is processing
74
  processing_message = await message.channel.send("Processing your request...")
75
-
76
- # Get AI response
77
  response = await get_ai_response(clean_message)
78
-
79
- # Split response if it's too long for Discord's 2000 character limit
80
  if len(response) > 2000:
81
  chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
82
  await processing_message.delete()
@@ -85,21 +72,34 @@ async def on_message(message):
85
  else:
86
  await processing_message.edit(content=response)
87
 
88
- # Error handling for connection issues
89
  @client.event
90
  async def on_error(event, *args, **kwargs):
91
- print(f"An error occurred: {event}")
92
  with open('error.log', 'a') as f:
93
  f.write(f"{event}\n")
94
 
95
- # Run the bot
96
- def main():
97
  try:
 
98
  client.run(TOKEN)
99
  except Exception as e:
100
- print(f"Failed to start bot: {e}")
101
  with open('error.log', 'a') as f:
102
  f.write(f"Failed to start bot: {e}\n")
103
 
 
 
 
 
 
 
 
104
  if __name__ == "__main__":
105
- main()
 
 
 
 
 
 
 
3
  from gradio_client import Client
4
  from huggingface_hub import InferenceClient
5
  import os
6
+ import logging
7
+ import gradio as gr
8
+ import threading
9
+
10
+ # Set up logging
11
+ logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
12
 
13
  # Get tokens from environment variables (Hugging Face Spaces secrets)
14
  TOKEN = os.getenv("DISCORD_TOKEN")
 
31
  # Function to process message and get response
32
  async def get_ai_response(message_content):
33
  try:
34
+ messages = [{"role": "user", "content": message_content}]
 
 
 
 
35
  response = ""
36
  stream = hf_client.chat.completions.create(
37
  model="Qwen/Qwen2.5-72B-Instruct",
 
41
  top_p=0.7,
42
  stream=True
43
  )
 
 
44
  for chunk in stream:
45
  content = chunk.choices[0].delta.content
46
  if content:
47
  response += content
 
48
  return response if response else "I couldn't generate a response."
 
49
  except Exception as e:
50
  return f"An error occurred: {str(e)}"
51
 
52
  @client.event
53
  async def on_ready():
54
+ logging.info(f'We have logged in as {client.user}')
55
 
56
  @client.event
57
  async def on_message(message):
 
58
  if message.author == client.user:
59
  return
 
 
60
  if client.user in message.mentions:
 
61
  clean_message = message.content.replace(f"<@{client.user.id}>", "").strip()
 
62
  if not clean_message:
63
  await message.channel.send("Please provide some text for me to respond to!")
64
  return
 
 
65
  processing_message = await message.channel.send("Processing your request...")
 
 
66
  response = await get_ai_response(clean_message)
 
 
67
  if len(response) > 2000:
68
  chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
69
  await processing_message.delete()
 
72
  else:
73
  await processing_message.edit(content=response)
74
 
 
75
  @client.event
76
  async def on_error(event, *args, **kwargs):
77
+ logging.error(f"An error occurred: {event}")
78
  with open('error.log', 'a') as f:
79
  f.write(f"{event}\n")
80
 
81
+ # Function to run the Discord bot in a separate thread
82
+ def run_discord_bot():
83
  try:
84
+ logging.info("Starting the Discord bot...")
85
  client.run(TOKEN)
86
  except Exception as e:
87
+ logging.error(f"Failed to start bot: {e}")
88
  with open('error.log', 'a') as f:
89
  f.write(f"Failed to start bot: {e}\n")
90
 
91
+ # Gradio interface to keep the Space alive
92
+ def create_interface():
93
+ invite_url = "Add this bot to your server by following this URL: https://discord.com/oauth2/authorize?client_id=1347942347077582880&permissions=377957238784&integration_type=0&scope=bot"
94
+ with gr.Blocks(title="Discord Bot Invite") as demo:
95
+ gr.Markdown(f"# Discord Bot\n{invite_url}")
96
+ return demo
97
+
98
  if __name__ == "__main__":
99
+ # Start the Discord bot in a separate thread
100
+ bot_thread = threading.Thread(target=run_discord_bot, daemon=True)
101
+ bot_thread.start()
102
+
103
+ # Launch the Gradio interface
104
+ interface = create_interface()
105
+ interface.launch(server_name="0.0.0.0", server_port=7860)