Niansuh commited on
Commit
61526f3
·
verified ·
1 Parent(s): e2aea7d

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +98 -76
main.py CHANGED
@@ -11,7 +11,7 @@ from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, Union
12
  from datetime import datetime
13
 
14
- from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
15
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
16
  from fastapi.responses import JSONResponse
17
  from pydantic import BaseModel
@@ -27,18 +27,11 @@ logger = logging.getLogger(__name__)
27
  # Load environment variables
28
  API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
29
  RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
30
- AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models
31
 
32
  if not API_KEYS or API_KEYS == ['']:
33
  logger.error("No API keys found. Please set the API_KEYS environment variable.")
34
  raise Exception("API_KEYS environment variable not set.")
35
 
36
- # Process available models
37
- if AVAILABLE_MODELS:
38
- AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()]
39
- else:
40
- AVAILABLE_MODELS = [] # If empty, all models are available
41
-
42
  # Simple in-memory rate limiter based solely on IP addresses
43
  rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
44
 
@@ -56,11 +49,9 @@ class Blackbox:
56
  supports_message_history = True
57
 
58
  default_model = 'blackboxai'
59
- image_models = ['ImageGeneration']
60
  models = [
61
  default_model,
62
  'blackboxai-pro',
63
- *image_models,
64
  "llama-3.1-8b",
65
  'llama-3.1-70b',
66
  'llama-3.1-405b',
@@ -68,25 +59,9 @@ class Blackbox:
68
  'gemini-pro',
69
  'gemini-1.5-flash',
70
  'claude-sonnet-3.5',
71
- 'PythonAgent',
72
- 'JavaAgent',
73
- 'JavaScriptAgent',
74
- 'HTMLAgent',
75
- 'GoogleCloudAgent',
76
- 'AndroidDeveloper',
77
- 'SwiftDeveloper',
78
- 'Next.jsAgent',
79
- 'MongoDBAgent',
80
- 'PyTorchAgent',
81
- 'ReactAgent',
82
- 'XcodeAgent',
83
- 'AngularJSAgent',
84
  ]
85
 
86
- agentMode = {
87
- 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
88
- }
89
-
90
  trendingAgentMode = {
91
  "blackboxai": {},
92
  "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
@@ -94,19 +69,6 @@ class Blackbox:
94
  'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
95
  'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
96
  'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
97
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
98
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
99
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
100
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
101
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
102
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
103
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
104
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
105
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
106
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
107
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
108
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
109
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
110
  }
111
 
112
  userSelectedModel = {
@@ -115,12 +77,23 @@ class Blackbox:
115
  'claude-sonnet-3.5': "claude-sonnet-3.5",
116
  }
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  model_aliases = {
119
- "gpt-3.5-turbo": "blackboxai",
120
- "gpt-4": "gpt-4o",
121
  "gemini-flash": "gemini-1.5-flash",
122
  "claude-3.5-sonnet": "claude-sonnet-3.5",
123
- "flux": "ImageGeneration",
124
  }
125
 
126
  @classmethod
@@ -137,6 +110,31 @@ class Blackbox:
137
  characters = string.ascii_letters + string.digits
138
  return ''.join(random.choices(characters, k=length))
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  @staticmethod
141
  def clean_response(text: str) -> str:
142
  pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
@@ -144,39 +142,61 @@ class Blackbox:
144
  return cleaned_text
145
 
146
  @classmethod
147
- async def create_completion(
148
  cls,
149
  model: str,
150
  messages: List[Dict[str, str]],
 
151
  **kwargs
152
  ) -> str:
153
- """
154
- Creates a completion using the Blackbox AI API.
155
- """
156
  model = cls.get_model(model)
157
- if model is None:
158
- logger.error(f"Model {model} is not available.")
159
- raise ModelNotWorkingException(model)
160
-
161
  chat_id = cls.generate_random_string()
 
 
 
 
 
162
 
 
 
163
  formatted_prompt = ""
164
  for message in messages:
165
  role = message.get('role', '').capitalize()
166
  content = message.get('content', '')
167
  if role and content:
168
  formatted_prompt += f"{role}: {content}\n"
 
 
 
169
 
170
- headers = {
171
- 'Content-Type': 'application/json',
 
 
172
  'accept': '*/*',
173
  'accept-language': 'en-US,en;q=0.9',
 
174
  'origin': cls.url,
175
- 'referer': f"{cls.url}/?model={model}",
176
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
 
 
 
 
 
 
 
 
 
177
  }
178
 
179
- payload = {
 
 
 
 
 
 
180
  "messages": [
181
  {
182
  "id": chat_id,
@@ -188,8 +208,8 @@ class Blackbox:
188
  "previewToken": None,
189
  "userId": None,
190
  "codeModelMode": True,
191
- "agentMode": cls.agentMode.get(model, {}),
192
- "trendingAgentMode": cls.trendingAgentMode.get(model, {}),
193
  "isMicMode": False,
194
  "userSystemPrompt": None,
195
  "maxTokens": 1024,
@@ -206,15 +226,16 @@ class Blackbox:
206
  "userSelectedModel": cls.userSelectedModel.get(model, model)
207
  }
208
 
209
- async with ClientSession() as session:
210
  try:
211
  async with session.post(
212
  cls.api_endpoint,
213
- headers=headers,
214
- json=payload
215
- ) as response:
216
- response.raise_for_status()
217
- text = await response.text()
 
218
  cleaned_response = cls.clean_response(text)
219
  return cleaned_response
220
  except ClientResponseError as e:
@@ -225,9 +246,9 @@ class Blackbox:
225
  error_text += f" - {cleaned_error}"
226
  except Exception:
227
  pass
228
- raise HTTPException(status_code=e.status, detail=error_text)
229
  except Exception as e:
230
- raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
231
 
232
  # Custom exception for model not working
233
  class ModelNotWorkingException(Exception):
@@ -261,7 +282,7 @@ async def rate_limiter_per_ip(request: Request):
261
  else:
262
  if rate_limit_store[client_ip]["count"] >= RATE_LIMIT:
263
  logger.warning(f"Rate limit exceeded for IP address: {client_ip}")
264
- raise HTTPException(status_code=429, detail='Rate limit exceeded')
265
  rate_limit_store[client_ip]["count"] += 1
266
 
267
  async def get_api_key(request: Request, authorization: str = Header(None)) -> str:
@@ -328,7 +349,7 @@ class ChatRequest(BaseModel):
328
  user: Optional[str] = None
329
 
330
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
331
- async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)): # Make sure this function is async
332
  client_ip = req.client.host
333
  # Redact user messages only for logging purposes
334
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
@@ -341,10 +362,12 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
341
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
342
  raise HTTPException(status_code=400, detail="Requested model is not available.")
343
 
344
- # Process the request with actual message content, but don't log it
345
- response_content = await Blackbox.create_completion( # Correct usage of await inside async function
346
  model=request.model,
347
- messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
 
 
348
  )
349
 
350
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
@@ -355,19 +378,19 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
355
  "model": request.model,
356
  "choices": [
357
  {
 
358
  "message": {
359
  "role": "assistant",
360
  "content": response_content
361
  },
362
- "finish_reason": "stop",
363
- "index": 0
364
  }
365
  ],
366
  "usage": {
367
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
368
  "completion_tokens": len(response_content.split()),
369
  "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
370
- }
371
  }
372
  except ModelNotWorkingException as e:
373
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
@@ -379,7 +402,6 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
379
  logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
380
  raise HTTPException(status_code=500, detail=str(e))
381
 
382
-
383
  # Endpoint: GET /v1/models
384
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
385
  async def get_models(req: Request):
@@ -413,4 +435,4 @@ async def http_exception_handler(request: Request, exc: HTTPException):
413
 
414
  if __name__ == "__main__":
415
  import uvicorn
416
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
11
  from typing import List, Dict, Any, Optional, Union
12
  from datetime import datetime
13
 
14
+ from aiohttp import ClientSession, ClientResponseError
15
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
16
  from fastapi.responses import JSONResponse
17
  from pydantic import BaseModel
 
27
  # Load environment variables
28
  API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
29
  RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
 
30
 
31
  if not API_KEYS or API_KEYS == ['']:
32
  logger.error("No API keys found. Please set the API_KEYS environment variable.")
33
  raise Exception("API_KEYS environment variable not set.")
34
 
 
 
 
 
 
 
35
  # Simple in-memory rate limiter based solely on IP addresses
36
  rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
37
 
 
49
  supports_message_history = True
50
 
51
  default_model = 'blackboxai'
 
52
  models = [
53
  default_model,
54
  'blackboxai-pro',
 
55
  "llama-3.1-8b",
56
  'llama-3.1-70b',
57
  'llama-3.1-405b',
 
59
  'gemini-pro',
60
  'gemini-1.5-flash',
61
  'claude-sonnet-3.5',
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  ]
63
 
64
+ agentMode = {}
 
 
 
65
  trendingAgentMode = {
66
  "blackboxai": {},
67
  "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
 
69
  'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
70
  'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
71
  'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  }
73
 
74
  userSelectedModel = {
 
77
  'claude-sonnet-3.5': "claude-sonnet-3.5",
78
  }
79
 
80
+ model_prefixes = {
81
+ 'gpt-4o': '@GPT-4o',
82
+ 'gemini-pro': '@Gemini-PRO',
83
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
84
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
85
+ }
86
+
87
+ model_referers = {
88
+ "blackboxai": "/?model=blackboxai",
89
+ "gpt-4o": "/?model=gpt-4o",
90
+ "gemini-pro": "/?model=gemini-pro",
91
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
92
+ }
93
+
94
  model_aliases = {
 
 
95
  "gemini-flash": "gemini-1.5-flash",
96
  "claude-3.5-sonnet": "claude-sonnet-3.5",
 
97
  }
98
 
99
  @classmethod
 
110
  characters = string.ascii_letters + string.digits
111
  return ''.join(random.choices(characters, k=length))
112
 
113
+ @staticmethod
114
+ def generate_next_action() -> str:
115
+ return uuid.uuid4().hex
116
+
117
+ @staticmethod
118
+ def generate_next_router_state_tree() -> str:
119
+ router_state = [
120
+ "",
121
+ {
122
+ "children": [
123
+ "(chat)",
124
+ {
125
+ "children": [
126
+ "__PAGE__",
127
+ {}
128
+ ]
129
+ }
130
+ ]
131
+ },
132
+ None,
133
+ None,
134
+ True
135
+ ]
136
+ return json.dumps(router_state)
137
+
138
  @staticmethod
139
  def clean_response(text: str) -> str:
140
  pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
 
142
  return cleaned_text
143
 
144
  @classmethod
145
+ async def generate_response(
146
  cls,
147
  model: str,
148
  messages: List[Dict[str, str]],
149
+ proxy: Optional[str] = None,
150
  **kwargs
151
  ) -> str:
 
 
 
152
  model = cls.get_model(model)
 
 
 
 
153
  chat_id = cls.generate_random_string()
154
+ next_action = cls.generate_next_action()
155
+ next_router_state_tree = cls.generate_next_router_state_tree()
156
+
157
+ agent_mode = cls.agentMode.get(model, {})
158
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
159
 
160
+ prefix = cls.model_prefixes.get(model, "")
161
+
162
  formatted_prompt = ""
163
  for message in messages:
164
  role = message.get('role', '').capitalize()
165
  content = message.get('content', '')
166
  if role and content:
167
  formatted_prompt += f"{role}: {content}\n"
168
+
169
+ if prefix:
170
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
171
 
172
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
173
+ referer_url = f"{cls.url}{referer_path}"
174
+
175
+ common_headers = {
176
  'accept': '*/*',
177
  'accept-language': 'en-US,en;q=0.9',
178
+ 'cache-control': 'no-cache',
179
  'origin': cls.url,
180
+ 'pragma': 'no-cache',
181
+ 'priority': 'u=1, i',
182
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
183
+ 'sec-ch-ua-mobile': '?0',
184
+ 'sec-ch-ua-platform': '"Linux"',
185
+ 'sec-fetch-dest': 'empty',
186
+ 'sec-fetch-mode': 'cors',
187
+ 'sec-fetch-site': 'same-origin',
188
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
189
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
190
+ 'Chrome/129.0.0.0 Safari/537.36'
191
  }
192
 
193
+ headers_api_chat = {
194
+ 'Content-Type': 'application/json',
195
+ 'Referer': referer_url
196
+ }
197
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
198
+
199
+ payload_api_chat = {
200
  "messages": [
201
  {
202
  "id": chat_id,
 
208
  "previewToken": None,
209
  "userId": None,
210
  "codeModelMode": True,
211
+ "agentMode": agent_mode,
212
+ "trendingAgentMode": trending_agent_mode,
213
  "isMicMode": False,
214
  "userSystemPrompt": None,
215
  "maxTokens": 1024,
 
226
  "userSelectedModel": cls.userSelectedModel.get(model, model)
227
  }
228
 
229
+ async with ClientSession(headers=common_headers) as session:
230
  try:
231
  async with session.post(
232
  cls.api_endpoint,
233
+ headers=headers_api_chat_combined,
234
+ json=payload_api_chat,
235
+ proxy=proxy
236
+ ) as response_api_chat:
237
+ response_api_chat.raise_for_status()
238
+ text = await response_api_chat.text()
239
  cleaned_response = cls.clean_response(text)
240
  return cleaned_response
241
  except ClientResponseError as e:
 
246
  error_text += f" - {cleaned_error}"
247
  except Exception:
248
  pass
249
+ return error_text
250
  except Exception as e:
251
+ return f"Unexpected error during /api/chat request: {str(e)}"
252
 
253
  # Custom exception for model not working
254
  class ModelNotWorkingException(Exception):
 
282
  else:
283
  if rate_limit_store[client_ip]["count"] >= RATE_LIMIT:
284
  logger.warning(f"Rate limit exceeded for IP address: {client_ip}")
285
+ raise HTTPException(status_code=429, detail='Rate limit exceeded for IP address | NiansuhAI')
286
  rate_limit_store[client_ip]["count"] += 1
287
 
288
  async def get_api_key(request: Request, authorization: str = Header(None)) -> str:
 
349
  user: Optional[str] = None
350
 
351
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
352
+ async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
353
  client_ip = req.client.host
354
  # Redact user messages only for logging purposes
355
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
 
362
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
363
  raise HTTPException(status_code=400, detail="Requested model is not available.")
364
 
365
+ # Process the request with actual message content
366
+ response_content = await Blackbox.generate_response(
367
  model=request.model,
368
+ messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
369
+ temperature=request.temperature,
370
+ max_tokens=request.max_tokens
371
  )
372
 
373
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
 
378
  "model": request.model,
379
  "choices": [
380
  {
381
+ "index": 0,
382
  "message": {
383
  "role": "assistant",
384
  "content": response_content
385
  },
386
+ "finish_reason": "stop"
 
387
  }
388
  ],
389
  "usage": {
390
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
391
  "completion_tokens": len(response_content.split()),
392
  "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
393
+ },
394
  }
395
  except ModelNotWorkingException as e:
396
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
 
402
  logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
403
  raise HTTPException(status_code=500, detail=str(e))
404
 
 
405
  # Endpoint: GET /v1/models
406
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
407
  async def get_models(req: Request):
 
435
 
436
  if __name__ == "__main__":
437
  import uvicorn
438
+ uvicorn.run(app, host="0.0.0.0", port=8000)