Niansuh commited on
Commit
f9d7486
·
verified ·
1 Parent(s): 65ea21b

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +385 -33
main.py CHANGED
@@ -1,5 +1,3 @@
1
- # main.py
2
-
3
  import os
4
  import re
5
  import random
@@ -14,13 +12,11 @@ from typing import List, Dict, Any, Optional, AsyncGenerator, Union
14
 
15
  from datetime import datetime
16
 
17
- from aiohttp import ClientSession, ClientTimeout, ClientError
18
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
19
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
20
  from pydantic import BaseModel
21
 
22
- from blackbox import Blackbox, ImageResponse # Import the new Blackbox class
23
-
24
  # Configure logging
25
  logging.basicConfig(
26
  level=logging.INFO,
@@ -41,7 +37,6 @@ if not API_KEYS or API_KEYS == ['']:
41
  # Process available models
42
  if AVAILABLE_MODELS:
43
  AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()]
44
- Blackbox.models = [model for model in Blackbox.models if model in AVAILABLE_MODELS]
45
  else:
46
  AVAILABLE_MODELS = [] # If empty, all models are available
47
 
@@ -101,34 +96,361 @@ class ModelNotWorkingException(Exception):
101
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
102
  super().__init__(self.message)
103
 
104
- def calculate_estimated_cost(prompt_tokens: int, completion_tokens: int) -> float:
105
- """
106
- Calculate the estimated cost based on the number of tokens.
107
- Replace the pricing below with your actual pricing model.
108
- """
109
- # Example pricing: $0.00000268 per token
110
- cost_per_token = 0.00000268
111
- return round((prompt_tokens + completion_tokens) * cost_per_token, 8)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
114
- return {
115
- "id": f"chatcmpl-{uuid.uuid4()}",
116
- "object": "chat.completion",
117
- "created": int(datetime.now().timestamp()),
118
- "model": model,
119
- "choices": [
120
- {
121
- "index": 0,
122
- "message": {
123
- "role": "assistant",
124
- "content": content
125
- },
126
- "finish_reason": finish_reason
127
- }
128
- ],
129
- "usage": None, # To be filled in non-streaming responses
 
 
 
130
  }
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  # FastAPI app setup
133
  app = FastAPI()
134
 
@@ -184,6 +506,34 @@ class ChatRequest(BaseModel):
184
  class TokenizerRequest(BaseModel):
185
  text: str
186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
188
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
189
  client_ip = req.client.host
@@ -201,8 +551,10 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
201
  # Process the request with actual message content, but don't log it
202
  async_generator = Blackbox.create_async_generator(
203
  model=request.model,
204
- messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
205
- websearch=request.webSearchMode
 
 
206
  )
207
 
208
  if request.stream:
 
 
 
1
  import os
2
  import re
3
  import random
 
12
 
13
  from datetime import datetime
14
 
15
+ from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
16
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
17
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
18
  from pydantic import BaseModel
19
 
 
 
20
  # Configure logging
21
  logging.basicConfig(
22
  level=logging.INFO,
 
37
  # Process available models
38
  if AVAILABLE_MODELS:
39
  AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()]
 
40
  else:
41
  AVAILABLE_MODELS = [] # If empty, all models are available
42
 
 
96
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
97
  super().__init__(self.message)
98
 
99
+ # Mock implementations for ImageResponse and to_data_uri
100
+ class ImageResponse:
101
+ def __init__(self, images: str, alt: str):
102
+ self.images = images
103
+ self.alt = alt
104
+
105
+ def to_data_uri(image: Any) -> str:
106
+ return "data:image/png;base64,..." # Replace with actual base64 data
107
+
108
+ # New Blackbox Class Integration
109
+ class Blackbox:
110
+ label = "Blackbox AI"
111
+ url = "https://www.blackbox.ai"
112
+ api_endpoint = "https://www.blackbox.ai/api/chat"
113
+ working = True
114
+ supports_gpt_4 = True
115
+ supports_stream = True
116
+ supports_system_message = True
117
+ supports_message_history = True
118
+
119
+ default_model = 'blackboxai'
120
+ image_models = ['ImageGeneration']
121
+ models = [
122
+ default_model,
123
+ 'blackboxai-pro',
124
+ *image_models,
125
+ "llama-3.1-8b",
126
+ 'llama-3.1-70b',
127
+ 'llama-3.1-405b',
128
+ 'gpt-4o',
129
+ 'gemini-pro',
130
+ 'gemini-1.5-flash',
131
+ 'claude-sonnet-3.5',
132
+ 'PythonAgent',
133
+ 'JavaAgent',
134
+ 'JavaScriptAgent',
135
+ 'HTMLAgent',
136
+ 'GoogleCloudAgent',
137
+ 'AndroidDeveloper',
138
+ 'SwiftDeveloper',
139
+ 'Next.jsAgent',
140
+ 'MongoDBAgent',
141
+ 'PyTorchAgent',
142
+ 'ReactAgent',
143
+ 'XcodeAgent',
144
+ 'AngularJSAgent',
145
+ ]
146
+
147
+ agentMode = {
148
+ 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
149
+ }
150
 
151
+ trendingAgentMode = {
152
+ "blackboxai": {},
153
+ "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
154
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
155
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
156
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
157
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
158
+ 'PythonAgent': {'mode': True, 'id': "Python Agent"},
159
+ 'JavaAgent': {'mode': True, 'id': "Java Agent"},
160
+ 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
161
+ 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
162
+ 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
163
+ 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
164
+ 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
165
+ 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
166
+ 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
167
+ 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
168
+ 'ReactAgent': {'mode': True, 'id': "React Agent"},
169
+ 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
170
+ 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
171
  }
172
 
173
+ userSelectedModel = {
174
+ "gpt-4o": "gpt-4o",
175
+ "gemini-pro": "gemini-pro",
176
+ 'claude-sonnet-3.5': "claude-sonnet-3.5",
177
+ }
178
+
179
+ model_prefixes = {
180
+ 'gpt-4o': '@GPT-4o',
181
+ 'gemini-pro': '@Gemini-PRO',
182
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
183
+ 'PythonAgent': '@Python Agent',
184
+ 'JavaAgent': '@Java Agent',
185
+ 'JavaScriptAgent': '@JavaScript Agent',
186
+ 'HTMLAgent': '@HTML Agent',
187
+ 'GoogleCloudAgent': '@Google Cloud Agent',
188
+ 'AndroidDeveloper': '@Android Developer',
189
+ 'SwiftDeveloper': '@Swift Developer',
190
+ 'Next.jsAgent': '@Next.js Agent',
191
+ 'MongoDBAgent': '@MongoDB Agent',
192
+ 'PyTorchAgent': '@PyTorch Agent',
193
+ 'ReactAgent': '@React Agent',
194
+ 'XcodeAgent': '@Xcode Agent',
195
+ 'AngularJSAgent': '@AngularJS Agent',
196
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
197
+ 'ImageGeneration': '@Image Generation',
198
+ }
199
+
200
+ model_referers = {
201
+ "blackboxai": "/?model=blackboxai",
202
+ "gpt-4o": "/?model=gpt-4o",
203
+ "gemini-pro": "/?model=gemini-pro",
204
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
205
+ }
206
+
207
+ model_aliases = {
208
+ "gemini-flash": "gemini-1.5-flash",
209
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
210
+ "flux": "ImageGeneration",
211
+ }
212
+
213
+ @classmethod
214
+ def get_model(cls, model: str) -> str:
215
+ if model in cls.models:
216
+ return model
217
+ elif model in cls.model_aliases:
218
+ return cls.model_aliases[model]
219
+ else:
220
+ return cls.default_model
221
+
222
+ @staticmethod
223
+ def generate_random_string(length: int = 7) -> str:
224
+ characters = string.ascii_letters + string.digits
225
+ return ''.join(random.choices(characters, k=length))
226
+
227
+ @staticmethod
228
+ def generate_next_action() -> str:
229
+ return uuid.uuid4().hex
230
+
231
+ @staticmethod
232
+ def generate_next_router_state_tree() -> str:
233
+ router_state = [
234
+ "",
235
+ {
236
+ "children": [
237
+ "(chat)",
238
+ {
239
+ "children": [
240
+ "__PAGE__",
241
+ {}
242
+ ]
243
+ }
244
+ ]
245
+ },
246
+ None,
247
+ None,
248
+ True
249
+ ]
250
+ return json.dumps(router_state)
251
+
252
+ @staticmethod
253
+ def clean_response(text: str) -> str:
254
+ pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
255
+ cleaned_text = re.sub(pattern, '', text)
256
+ return cleaned_text
257
+
258
+ @classmethod
259
+ async def create_async_generator(
260
+ cls,
261
+ model: str,
262
+ messages: List[Dict[str, str]],
263
+ proxy: Optional[str] = None,
264
+ image: Any = None,
265
+ image_name: Optional[str] = None,
266
+ webSearchMode: bool = False,
267
+ **kwargs
268
+ ) -> AsyncGenerator[Union[str, ImageResponse], None]:
269
+ """
270
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
271
+
272
+ Parameters:
273
+ model (str): Model to use for generating responses.
274
+ messages (List[Dict[str, str]]): Message history.
275
+ proxy (Optional[str]): Proxy URL, if needed.
276
+ image (Any): Image data, if any.
277
+ image_name (Optional[str]): Name of the image, if any.
278
+ webSearchMode (bool): Enables or disables web search mode.
279
+ **kwargs: Additional keyword arguments.
280
+
281
+ Yields:
282
+ Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
283
+ """
284
+ model = cls.get_model(model)
285
+
286
+ chat_id = cls.generate_random_string()
287
+ next_action = cls.generate_next_action()
288
+ next_router_state_tree = cls.generate_next_router_state_tree()
289
+
290
+ agent_mode = cls.agentMode.get(model, {})
291
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
292
+
293
+ prefix = cls.model_prefixes.get(model, "")
294
+
295
+ formatted_prompt = ""
296
+ for message in messages:
297
+ role = message.get('role', '').capitalize()
298
+ content = message.get('content', '')
299
+ if role and content:
300
+ formatted_prompt += f"{role}: {content}\n"
301
+
302
+ if prefix:
303
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
304
+
305
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
306
+ referer_url = f"{cls.url}{referer_path}"
307
+
308
+ common_headers = {
309
+ 'accept': '*/*',
310
+ 'accept-language': 'en-US,en;q=0.9',
311
+ 'cache-control': 'no-cache',
312
+ 'origin': cls.url,
313
+ 'pragma': 'no-cache',
314
+ 'priority': 'u=1, i',
315
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
316
+ 'sec-ch-ua-mobile': '?0',
317
+ 'sec-ch-ua-platform': '"Linux"',
318
+ 'sec-fetch-dest': 'empty',
319
+ 'sec-fetch-mode': 'cors',
320
+ 'sec-fetch-site': 'same-origin',
321
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
322
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
323
+ 'Chrome/129.0.0.0 Safari/537.36'
324
+ }
325
+
326
+ headers_api_chat = {
327
+ 'Content-Type': 'application/json',
328
+ 'Referer': referer_url
329
+ }
330
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
331
+
332
+ payload_api_chat = {
333
+ "messages": [
334
+ {
335
+ "id": chat_id,
336
+ "content": formatted_prompt,
337
+ "role": "user"
338
+ }
339
+ ],
340
+ "id": chat_id,
341
+ "previewToken": None,
342
+ "userId": None,
343
+ "codeModelMode": True,
344
+ "agentMode": agent_mode,
345
+ "trendingAgentMode": trending_agent_mode,
346
+ "isMicMode": False,
347
+ "userSystemPrompt": None,
348
+ "maxTokens": 1024,
349
+ "playgroundTopP": 0.9,
350
+ "playgroundTemperature": 0.5,
351
+ "isChromeExt": False,
352
+ "githubToken": None,
353
+ "clickedAnswer2": False,
354
+ "clickedAnswer3": False,
355
+ "clickedForceWebSearch": False,
356
+ "visitFromDelta": False,
357
+ "mobileClient": False,
358
+ "webSearchMode": webSearchMode,
359
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
360
+ }
361
+
362
+ headers_chat = {
363
+ 'Accept': 'text/x-component',
364
+ 'Content-Type': 'text/plain;charset=UTF-8',
365
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
366
+ 'next-action': next_action,
367
+ 'next-router-state-tree': next_router_state_tree,
368
+ 'next-url': '/'
369
+ }
370
+ headers_chat_combined = {**common_headers, **headers_chat}
371
+
372
+ data_chat = '[]'
373
+
374
+ async with ClientSession(headers=common_headers) as session:
375
+ try:
376
+ async with session.post(
377
+ cls.api_endpoint,
378
+ headers=headers_api_chat_combined,
379
+ json=payload_api_chat,
380
+ proxy=proxy
381
+ ) as response_api_chat:
382
+ response_api_chat.raise_for_status()
383
+ text = await response_api_chat.text()
384
+ cleaned_response = cls.clean_response(text)
385
+
386
+ if model in cls.image_models:
387
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
388
+ if match:
389
+ image_url = match.group(1)
390
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
391
+ yield image_response
392
+ else:
393
+ yield cleaned_response
394
+ else:
395
+ if webSearchMode:
396
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
397
+ if match:
398
+ source_part = match.group(1).strip()
399
+ answer_part = cleaned_response[match.end():].strip()
400
+ try:
401
+ sources = json.loads(source_part)
402
+ source_formatted = "**Sources:**\n"
403
+ for item in sources[:5]:
404
+ title = item.get('title', 'No Title')
405
+ link = item.get('link', '#')
406
+ source_formatted += f"- [{title}]({link})\n"
407
+ final_response = f"{answer_part}\n\n{source_formatted}"
408
+ except json.JSONDecodeError:
409
+ final_response = f"{answer_part}\n\nSource information is unavailable."
410
+ else:
411
+ final_response = cleaned_response
412
+ else:
413
+ if '$~~~$' in cleaned_response:
414
+ final_response = cleaned_response.split('$~~~$')[0].strip()
415
+ else:
416
+ final_response = cleaned_response
417
+
418
+ yield final_response
419
+ except ClientResponseError as e:
420
+ error_text = f"Error {e.status}: {e.message}"
421
+ try:
422
+ error_response = await e.response.text()
423
+ cleaned_error = cls.clean_response(error_response)
424
+ error_text += f" - {cleaned_error}"
425
+ except Exception:
426
+ pass
427
+ yield error_text
428
+ except Exception as e:
429
+ yield f"Unexpected error during /api/chat request: {str(e)}"
430
+
431
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
432
+
433
+ try:
434
+ async with session.post(
435
+ chat_url,
436
+ headers=headers_chat_combined,
437
+ data=data_chat,
438
+ proxy=proxy
439
+ ) as response_chat:
440
+ response_chat.raise_for_status()
441
+ # Assuming some side-effect or logging is needed here
442
+ except ClientResponseError as e:
443
+ error_text = f"Error {e.status}: {e.message}"
444
+ try:
445
+ error_response = await e.response.text()
446
+ cleaned_error = cls.clean_response(error_response)
447
+ error_text += f" - {cleaned_error}"
448
+ except Exception:
449
+ pass
450
+ yield error_text
451
+ except Exception as e:
452
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
453
+
454
  # FastAPI app setup
455
  app = FastAPI()
456
 
 
506
  class TokenizerRequest(BaseModel):
507
  text: str
508
 
509
+ def calculate_estimated_cost(prompt_tokens: int, completion_tokens: int) -> float:
510
+ """
511
+ Calculate the estimated cost based on the number of tokens.
512
+ Replace the pricing below with your actual pricing model.
513
+ """
514
+ # Example pricing: $0.00000268 per token
515
+ cost_per_token = 0.00000268
516
+ return round((prompt_tokens + completion_tokens) * cost_per_token, 8)
517
+
518
+ def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
519
+ return {
520
+ "id": f"chatcmpl-{uuid.uuid4()}",
521
+ "object": "chat.completion",
522
+ "created": int(datetime.now().timestamp()),
523
+ "model": model,
524
+ "choices": [
525
+ {
526
+ "index": 0,
527
+ "message": {
528
+ "role": "assistant",
529
+ "content": content
530
+ },
531
+ "finish_reason": finish_reason
532
+ }
533
+ ],
534
+ "usage": None, # To be filled in non-streaming responses
535
+ }
536
+
537
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
538
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
539
  client_ip = req.client.host
 
551
  # Process the request with actual message content, but don't log it
552
  async_generator = Blackbox.create_async_generator(
553
  model=request.model,
554
+ messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
555
+ image=None,
556
+ image_name=None,
557
+ webSearchMode=request.webSearchMode
558
  )
559
 
560
  if request.stream: