Niansuh commited on
Commit
7725f9f
·
verified ·
1 Parent(s): f0b1c59

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +210 -145
main.py CHANGED
@@ -12,7 +12,7 @@ from typing import List, Dict, Any, Optional, AsyncGenerator, Union
12
 
13
  from datetime import datetime
14
 
15
- from aiohttp import ClientSession, ClientTimeout, ClientError
16
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
17
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
18
  from pydantic import BaseModel
@@ -96,12 +96,13 @@ class ModelNotWorkingException(Exception):
96
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
97
  super().__init__(self.message)
98
 
99
- # Mock implementations for ImageResponse and to_data_uri
100
- class ImageResponse:
101
- def __init__(self, url: str, alt: str):
102
- self.url = url
103
  self.alt = alt
104
 
 
105
  def to_data_uri(image: Any) -> str:
106
  return "data:image/png;base64,..." # Replace with actual base64 data
107
 
@@ -118,6 +119,7 @@ class Blackbox:
118
  models = [
119
  default_model,
120
  'blackboxai-pro',
 
121
  "llama-3.1-8b",
122
  'llama-3.1-70b',
123
  'llama-3.1-405b',
@@ -138,8 +140,6 @@ class Blackbox:
138
  'ReactAgent',
139
  'XcodeAgent',
140
  'AngularJSAgent',
141
- *image_models,
142
- 'Niansuh',
143
  ]
144
 
145
  # Filter models based on AVAILABLE_MODELS
@@ -201,10 +201,10 @@ class Blackbox:
201
  }
202
 
203
  model_referers = {
204
- "blackboxai": f"{url}/?model=blackboxai",
205
- "gpt-4o": f"{url}/?model=gpt-4o",
206
- "gemini-pro": f"{url}/?model=gemini-pro",
207
- "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
208
  }
209
 
210
  model_aliases = {
@@ -215,15 +215,49 @@ class Blackbox:
215
  }
216
 
217
  @classmethod
218
- def get_model(cls, model: str) -> Optional[str]:
219
  if model in cls.models:
220
  return model
221
- elif model in cls.userSelectedModel and cls.userSelectedModel[model] in cls.models:
222
- return cls.userSelectedModel[model]
223
- elif model in cls.model_aliases and cls.model_aliases[model] in cls.models:
224
  return cls.model_aliases[model]
225
  else:
226
- return cls.default_model if cls.default_model in cls.models else None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
  @classmethod
229
  async def create_async_generator(
@@ -231,73 +265,87 @@ class Blackbox:
231
  model: str,
232
  messages: List[Dict[str, str]],
233
  proxy: Optional[str] = None,
234
- image: Any = None,
235
- image_name: Optional[str] = None,
236
  websearch: bool = False,
237
  **kwargs
238
- ) -> AsyncGenerator[Any, None]:
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  model = cls.get_model(model)
240
- if model is None:
241
- logger.error(f"Model {model} is not available.")
242
- raise ModelNotWorkingException(model)
243
 
244
- logger.info(f"Selected model: {model}")
 
 
 
 
 
245
 
246
- if not cls.working or model not in cls.models:
247
- logger.error(f"Model {model} is not working or not supported.")
248
- raise ModelNotWorkingException(model)
249
 
250
- headers = {
251
- "accept": "*/*",
252
- "accept-language": "en-US,en;q=0.9",
253
- "cache-control": "no-cache",
254
- "content-type": "application/json",
255
- "origin": cls.url,
256
- "pragma": "no-cache",
257
- "priority": "u=1, i",
258
- "referer": cls.model_referers.get(model, cls.url),
259
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
260
- "sec-ch-ua-mobile": "?0",
261
- "sec-ch-ua-platform": '"Linux"',
262
- "sec-fetch-dest": "empty",
263
- "sec-fetch-mode": "cors",
264
- "sec-fetch-site": "same-origin",
265
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  }
267
 
268
- if model in cls.model_prefixes:
269
- prefix = cls.model_prefixes[model]
270
- if not messages[0]['content'].startswith(prefix):
271
- logger.debug(f"Adding prefix '{prefix}' to the first message.")
272
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
273
-
274
- random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
275
- messages[-1]['id'] = random_id
276
- messages[-1]['role'] = 'user'
277
-
278
- # Don't log the full message content for privacy
279
- logger.debug(f"Generated message ID: {random_id} for model: {model}")
280
-
281
- if image is not None:
282
- messages[-1]['data'] = {
283
- 'fileText': '',
284
- 'imageBase64': to_data_uri(image),
285
- 'title': image_name
286
- }
287
- messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
288
- logger.debug("Image data added to the message.")
289
-
290
- data = {
291
- "messages": messages,
292
- "id": random_id,
293
  "previewToken": None,
294
  "userId": None,
295
  "codeModelMode": True,
296
- "agentMode": {},
297
- "trendingAgentMode": {},
298
  "isMicMode": False,
299
  "userSystemPrompt": None,
300
- "maxTokens": 99999999,
301
  "playgroundTopP": 0.9,
302
  "playgroundTemperature": 0.5,
303
  "isChromeExt": False,
@@ -307,81 +355,102 @@ class Blackbox:
307
  "clickedForceWebSearch": False,
308
  "visitFromDelta": False,
309
  "mobileClient": False,
310
- "userSelectedModel": None,
311
  "webSearchMode": websearch,
 
312
  }
313
 
314
- if model in cls.agentMode:
315
- data["agentMode"] = cls.agentMode[model]
316
- elif model in cls.trendingAgentMode:
317
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
318
- elif model in cls.userSelectedModel:
319
- data["userSelectedModel"] = cls.userSelectedModel[model]
320
- logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).")
 
 
321
 
322
- timeout = ClientTimeout(total=60) # Set an appropriate timeout
323
- retry_attempts = 10 # Set the number of retry attempts
324
 
325
- for attempt in range(retry_attempts):
326
  try:
327
- async with ClientSession(headers=headers, timeout=timeout) as session:
328
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
329
- response.raise_for_status()
330
- logger.info(f"Received response with status {response.status}")
331
- if model == 'ImageGeneration':
332
- response_text = await response.text()
333
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
334
- if url_match:
335
- image_url = url_match.group(0)
336
- logger.info(f"Image URL found.")
337
- yield ImageResponse(image_url, alt=messages[-1]['content'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  else:
339
- logger.error("Image URL not found in the response.")
340
- raise Exception("Image URL not found in the response")
341
  else:
342
- full_response = ""
343
- search_results_json = ""
344
- try:
345
- async for chunk, _ in response.content.iter_chunks():
346
- if chunk:
347
- decoded_chunk = chunk.decode(errors='ignore')
348
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
349
- if decoded_chunk.strip():
350
- if '$~~~$' in decoded_chunk:
351
- search_results_json += decoded_chunk
352
- else:
353
- full_response += decoded_chunk
354
- yield decoded_chunk
355
- logger.info("Finished streaming response chunks.")
356
- except Exception as e:
357
- logger.exception("Error while iterating over response chunks.")
358
- raise e
359
- if data["webSearchMode"] and search_results_json:
360
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
361
- if match:
362
- try:
363
- search_results = json.loads(match.group(1))
364
- formatted_results = "\n\n**Sources:**\n"
365
- for i, result in enumerate(search_results[:5], 1):
366
- formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
367
- logger.info("Formatted search results.")
368
- yield formatted_results
369
- except json.JSONDecodeError as je:
370
- logger.error("Failed to parse search results JSON.")
371
- raise je
372
- break # Exit the retry loop if successful
373
- except ClientError as ce:
374
- logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
375
- if attempt == retry_attempts - 1:
376
- raise HTTPException(status_code=502, detail="Error communicating with the external API.")
377
- except asyncio.TimeoutError:
378
- logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
379
- if attempt == retry_attempts - 1:
380
- raise HTTPException(status_code=504, detail="External API request timed out.")
381
  except Exception as e:
382
- logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
383
- if attempt == retry_attempts - 1:
384
- raise HTTPException(status_code=500, detail=str(e))
385
 
386
  # FastAPI app setup
387
  app = FastAPI()
@@ -484,9 +553,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
484
  async_generator = Blackbox.create_async_generator(
485
  model=request.model,
486
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
487
- image=None,
488
- image_name=None,
489
- webSearchMode=request.webSearchMode
490
  )
491
 
492
  if request.stream:
@@ -494,9 +561,9 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
494
  try:
495
  assistant_content = ""
496
  async for chunk in async_generator:
497
- if isinstance(chunk, ImageResponse):
498
  # Handle image responses if necessary
499
- image_markdown = f"![image]({chunk.url})\n"
500
  assistant_content += image_markdown
501
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
502
  else:
@@ -519,9 +586,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
519
  yield f"data: {json.dumps(response_chunk)}\n\n"
520
 
521
  # After all chunks are sent, send the final message with finish_reason
522
- # *** Key Correction Starts Here ***
523
  prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
524
- # *** Key Correction Ends Here ***
525
  completion_tokens = len(assistant_content.split())
526
  total_tokens = prompt_tokens + completion_tokens
527
  estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
@@ -562,8 +627,8 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
562
  else:
563
  response_content = ""
564
  async for chunk in async_generator:
565
- if isinstance(chunk, ImageResponse):
566
- response_content += f"![image]({chunk.url})\n"
567
  else:
568
  response_content += chunk
569
 
 
12
 
13
  from datetime import datetime
14
 
15
+ from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
16
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
17
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
18
  from pydantic import BaseModel
 
96
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
97
  super().__init__(self.message)
98
 
99
+ # Define ImageResponse class
100
+ class ImageResponseData:
101
+ def __init__(self, images: str, alt: str):
102
+ self.images = images
103
  self.alt = alt
104
 
105
+ # Mock implementation of to_data_uri
106
  def to_data_uri(image: Any) -> str:
107
  return "data:image/png;base64,..." # Replace with actual base64 data
108
 
 
119
  models = [
120
  default_model,
121
  'blackboxai-pro',
122
+ *image_models,
123
  "llama-3.1-8b",
124
  'llama-3.1-70b',
125
  'llama-3.1-405b',
 
140
  'ReactAgent',
141
  'XcodeAgent',
142
  'AngularJSAgent',
 
 
143
  ]
144
 
145
  # Filter models based on AVAILABLE_MODELS
 
201
  }
202
 
203
  model_referers = {
204
+ "blackboxai": "/?model=blackboxai",
205
+ "gpt-4o": "/?model=gpt-4o",
206
+ "gemini-pro": "/?model=gemini-pro",
207
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
208
  }
209
 
210
  model_aliases = {
 
215
  }
216
 
217
  @classmethod
218
+ def get_model(cls, model: str) -> str:
219
  if model in cls.models:
220
  return model
221
+ elif model in cls.model_aliases:
 
 
222
  return cls.model_aliases[model]
223
  else:
224
+ return cls.default_model
225
+
226
+ @staticmethod
227
+ def generate_random_string(length: int = 7) -> str:
228
+ characters = string.ascii_letters + string.digits
229
+ return ''.join(random.choices(characters, k=length))
230
+
231
+ @staticmethod
232
+ def generate_next_action() -> str:
233
+ return uuid.uuid4().hex
234
+
235
+ @staticmethod
236
+ def generate_next_router_state_tree() -> str:
237
+ router_state = [
238
+ "",
239
+ {
240
+ "children": [
241
+ "(chat)",
242
+ {
243
+ "children": [
244
+ "__PAGE__",
245
+ {}
246
+ ]
247
+ }
248
+ ]
249
+ },
250
+ None,
251
+ None,
252
+ True
253
+ ]
254
+ return json.dumps(router_state)
255
+
256
+ @staticmethod
257
+ def clean_response(text: str) -> str:
258
+ pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
259
+ cleaned_text = re.sub(pattern, '', text)
260
+ return cleaned_text
261
 
262
  @classmethod
263
  async def create_async_generator(
 
265
  model: str,
266
  messages: List[Dict[str, str]],
267
  proxy: Optional[str] = None,
 
 
268
  websearch: bool = False,
269
  **kwargs
270
+ ) -> AsyncGenerator[Union[str, ImageResponseData], None]:
271
+ """
272
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
273
+
274
+ Parameters:
275
+ model (str): Model to use for generating responses.
276
+ messages (List[Dict[str, str]]): Message history.
277
+ proxy (Optional[str]): Proxy URL, if needed.
278
+ websearch (bool): Enables or disables web search mode.
279
+ **kwargs: Additional keyword arguments.
280
+
281
+ Yields:
282
+ Union[str, ImageResponseData]: Segments of the generated response or ImageResponseData objects.
283
+ """
284
  model = cls.get_model(model)
 
 
 
285
 
286
+ chat_id = cls.generate_random_string()
287
+ next_action = cls.generate_next_action()
288
+ next_router_state_tree = cls.generate_next_router_state_tree()
289
+
290
+ agent_mode = cls.agentMode.get(model, {})
291
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
292
 
293
+ prefix = cls.model_prefixes.get(model, "")
 
 
294
 
295
+ formatted_prompt = ""
296
+ for message in messages:
297
+ role = message.get('role', '').capitalize()
298
+ content = message.get('content', '')
299
+ if role and content:
300
+ formatted_prompt += f"{role}: {content}\n"
301
+
302
+ if prefix:
303
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
304
+
305
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
306
+ referer_url = f"{cls.url}{referer_path}"
307
+
308
+ common_headers = {
309
+ 'accept': '*/*',
310
+ 'accept-language': 'en-US,en;q=0.9',
311
+ 'cache-control': 'no-cache',
312
+ 'origin': cls.url,
313
+ 'pragma': 'no-cache',
314
+ 'priority': 'u=1, i',
315
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
316
+ 'sec-ch-ua-mobile': '?0',
317
+ 'sec-ch-ua-platform': '"Linux"',
318
+ 'sec-fetch-dest': 'empty',
319
+ 'sec-fetch-mode': 'cors',
320
+ 'sec-fetch-site': 'same-origin',
321
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
322
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
323
+ 'Chrome/129.0.0.0 Safari/537.36'
324
  }
325
 
326
+ headers_api_chat = {
327
+ 'Content-Type': 'application/json',
328
+ 'Referer': referer_url
329
+ }
330
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
331
+
332
+ payload_api_chat = {
333
+ "messages": [
334
+ {
335
+ "id": chat_id,
336
+ "content": formatted_prompt,
337
+ "role": "user"
338
+ }
339
+ ],
340
+ "id": chat_id,
 
 
 
 
 
 
 
 
 
 
341
  "previewToken": None,
342
  "userId": None,
343
  "codeModelMode": True,
344
+ "agentMode": agent_mode,
345
+ "trendingAgentMode": trending_agent_mode,
346
  "isMicMode": False,
347
  "userSystemPrompt": None,
348
+ "maxTokens": 1024,
349
  "playgroundTopP": 0.9,
350
  "playgroundTemperature": 0.5,
351
  "isChromeExt": False,
 
355
  "clickedForceWebSearch": False,
356
  "visitFromDelta": False,
357
  "mobileClient": False,
 
358
  "webSearchMode": websearch,
359
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
360
  }
361
 
362
+ headers_chat = {
363
+ 'Accept': 'text/x-component',
364
+ 'Content-Type': 'text/plain;charset=UTF-8',
365
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
366
+ 'next-action': next_action,
367
+ 'next-router-state-tree': next_router_state_tree,
368
+ 'next-url': '/'
369
+ }
370
+ headers_chat_combined = {**common_headers, **headers_chat}
371
 
372
+ data_chat = '[]'
 
373
 
374
+ async with ClientSession(headers=common_headers) as session:
375
  try:
376
+ async with session.post(
377
+ cls.api_endpoint,
378
+ headers=headers_api_chat_combined,
379
+ json=payload_api_chat,
380
+ proxy=proxy
381
+ ) as response_api_chat:
382
+ response_api_chat.raise_for_status()
383
+ text = await response_api_chat.text()
384
+ cleaned_response = cls.clean_response(text)
385
+
386
+ if model in cls.image_models:
387
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
388
+ if match:
389
+ image_url = match.group(1)
390
+ image_response = ImageResponseData(images=image_url, alt="Generated Image")
391
+ yield image_response
392
+ else:
393
+ yield cleaned_response
394
+ else:
395
+ if websearch:
396
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
397
+ if match:
398
+ source_part = match.group(1).strip()
399
+ answer_part = cleaned_response[match.end():].strip()
400
+ try:
401
+ sources = json.loads(source_part)
402
+ source_formatted = "**Sources:**\n"
403
+ for item in sources[:5]:
404
+ title = item.get('title', 'No Title')
405
+ link = item.get('link', '#')
406
+ position = item.get('position', '')
407
+ source_formatted += f"- [{title}]({link})\n"
408
+ final_response = f"{answer_part}\n\n{source_formatted}"
409
+ except json.JSONDecodeError:
410
+ final_response = f"{answer_part}\n\nSource information is unavailable."
411
  else:
412
+ final_response = cleaned_response
 
413
  else:
414
+ if '$~~~$' in cleaned_response:
415
+ final_response = cleaned_response.split('$~~~$')[0].strip()
416
+ else:
417
+ final_response = cleaned_response
418
+
419
+ yield final_response
420
+ except ClientResponseError as e:
421
+ error_text = f"Error {e.status}: {e.message}"
422
+ try:
423
+ error_response = await e.response.text()
424
+ cleaned_error = cls.clean_response(error_response)
425
+ error_text += f" - {cleaned_error}"
426
+ except Exception:
427
+ pass
428
+ yield error_text
429
+ except Exception as e:
430
+ yield f"Unexpected error during /api/chat request: {str(e)}"
431
+
432
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
433
+
434
+ try:
435
+ async with session.post(
436
+ chat_url,
437
+ headers=headers_chat_combined,
438
+ data=data_chat,
439
+ proxy=proxy
440
+ ) as response_chat:
441
+ response_chat.raise_for_status()
442
+ # No action needed based on the original code
443
+ except ClientResponseError as e:
444
+ error_text = f"Error {e.status}: {e.message}"
445
+ try:
446
+ error_response = await e.response.text()
447
+ cleaned_error = cls.clean_response(error_response)
448
+ error_text += f" - {cleaned_error}"
449
+ except Exception:
450
+ pass
451
+ yield error_text
 
452
  except Exception as e:
453
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
 
 
454
 
455
  # FastAPI app setup
456
  app = FastAPI()
 
553
  async_generator = Blackbox.create_async_generator(
554
  model=request.model,
555
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
556
+ websearch=request.websearch
 
 
557
  )
558
 
559
  if request.stream:
 
561
  try:
562
  assistant_content = ""
563
  async for chunk in async_generator:
564
+ if isinstance(chunk, ImageResponseData):
565
  # Handle image responses if necessary
566
+ image_markdown = f"![image]({chunk.images})\n"
567
  assistant_content += image_markdown
568
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
569
  else:
 
586
  yield f"data: {json.dumps(response_chunk)}\n\n"
587
 
588
  # After all chunks are sent, send the final message with finish_reason
 
589
  prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
 
590
  completion_tokens = len(assistant_content.split())
591
  total_tokens = prompt_tokens + completion_tokens
592
  estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
 
627
  else:
628
  response_content = ""
629
  async for chunk in async_generator:
630
+ if isinstance(chunk, ImageResponseData):
631
+ response_content += f"![image]({chunk.images})\n"
632
  else:
633
  response_content += chunk
634