Niansuh commited on
Commit
979ad29
·
verified ·
1 Parent(s): 3b1505d

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +215 -27
main.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import os
2
  import re
3
  import random
@@ -8,12 +10,13 @@ import logging
8
  import asyncio
9
  import time
10
  from collections import defaultdict
11
- from typing import List, Dict, Any, Optional
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
- from fastapi.responses import JSONResponse
16
  from pydantic import BaseModel
 
17
 
18
  # Configure logging
19
  logging.basicConfig(
@@ -38,7 +41,18 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
38
  CLEANUP_INTERVAL = 60 # seconds
39
  RATE_LIMIT_WINDOW = 60 # seconds
40
 
41
- class Blackbox:
 
 
 
 
 
 
 
 
 
 
 
42
  label = "Blackbox AI"
43
  url = "https://www.blackbox.ai"
44
  api_endpoint = "https://www.blackbox.ai/api/chat"
@@ -310,6 +324,199 @@ class Blackbox:
310
  except Exception as e:
311
  return f"Unexpected error during /api/chat request: {str(e)}"
312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  # Custom exception for model not working
314
  class ModelNotWorkingException(Exception):
315
  def __init__(self, model: str):
@@ -422,36 +629,17 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
422
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
423
  raise HTTPException(status_code=400, detail="Requested model is not available.")
424
 
425
- # Process the request with actual message content
426
- response_content = await Blackbox.generate_response(
427
  model=request.model,
428
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
429
  temperature=request.temperature,
430
  max_tokens=request.max_tokens
431
  )
432
 
433
- logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
434
- return {
435
- "id": f"chatcmpl-{uuid.uuid4()}",
436
- "object": "chat.completion",
437
- "created": int(datetime.now().timestamp()),
438
- "model": request.model,
439
- "choices": [
440
- {
441
- "index": 0,
442
- "message": {
443
- "role": "assistant",
444
- "content": response_content
445
- },
446
- "finish_reason": "stop"
447
- }
448
- ],
449
- "usage": {
450
- "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
451
- "completion_tokens": len(response_content.split()),
452
- "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
453
- },
454
- }
455
  except ModelNotWorkingException as e:
456
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
457
  raise HTTPException(status_code=503, detail=str(e))
 
1
+ from __future__ import annotations
2
+
3
  import os
4
  import re
5
  import random
 
10
  import asyncio
11
  import time
12
  from collections import defaultdict
13
+ from typing import List, Dict, Any, Optional, Union, AsyncGenerator
14
 
15
  from aiohttp import ClientSession, ClientResponseError
16
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
17
+ from fastapi.responses import JSONResponse, StreamingResponse
18
  from pydantic import BaseModel
19
+ from datetime import datetime # Fix for 'datetime' not defined
20
 
21
  # Configure logging
22
  logging.basicConfig(
 
41
  CLEANUP_INTERVAL = 60 # seconds
42
  RATE_LIMIT_WINDOW = 60 # seconds
43
 
44
+ # Define ImageResponse and base classes if not defined elsewhere
45
+ class ImageResponse(BaseModel):
46
+ images: str
47
+ alt: str
48
+
49
+ class AsyncGeneratorProvider:
50
+ pass
51
+
52
+ class ProviderModelMixin:
53
+ pass
54
+
55
+ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
56
  label = "Blackbox AI"
57
  url = "https://www.blackbox.ai"
58
  api_endpoint = "https://www.blackbox.ai/api/chat"
 
324
  except Exception as e:
325
  return f"Unexpected error during /api/chat request: {str(e)}"
326
 
327
+ @classmethod
328
+ async def create_async_generator(
329
+ cls,
330
+ model: str,
331
+ messages: List[Dict[str, str]],
332
+ proxy: Optional[str] = None,
333
+ websearch: bool = False,
334
+ **kwargs
335
+ ) -> AsyncGenerator[Union[str, ImageResponse], None]:
336
+ """
337
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
338
+
339
+ Parameters:
340
+ model (str): Model to use for generating responses.
341
+ messages (List[Dict[str, str]]): Message history.
342
+ proxy (Optional[str]): Proxy URL, if needed.
343
+ websearch (bool): Enables or disables web search mode.
344
+ **kwargs: Additional keyword arguments.
345
+
346
+ Yields:
347
+ Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
348
+ """
349
+ model = cls.get_model(model)
350
+
351
+ chat_id = cls.generate_random_string()
352
+ next_action = cls.generate_next_action()
353
+ next_router_state_tree = cls.generate_next_router_state_tree()
354
+
355
+ agent_mode = cls.agentMode.get(model, {})
356
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
357
+
358
+ prefix = cls.model_prefixes.get(model, "")
359
+
360
+ formatted_prompt = ""
361
+ for message in messages:
362
+ role = message.get('role', '').capitalize()
363
+ content = message.get('content', '')
364
+ if role and content:
365
+ formatted_prompt += f"{role}: {content}\n"
366
+
367
+ if prefix:
368
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
369
+
370
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
371
+ referer_url = f"{cls.url}{referer_path}"
372
+
373
+ common_headers = {
374
+ 'accept': '*/*',
375
+ 'accept-language': 'en-US,en;q=0.9',
376
+ 'cache-control': 'no-cache',
377
+ 'origin': cls.url,
378
+ 'pragma': 'no-cache',
379
+ 'priority': 'u=1, i',
380
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
381
+ 'sec-ch-ua-mobile': '?0',
382
+ 'sec-ch-ua-platform': '"Linux"',
383
+ 'sec-fetch-dest': 'empty',
384
+ 'sec-fetch-mode': 'cors',
385
+ 'sec-fetch-site': 'same-origin',
386
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
387
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
388
+ 'Chrome/129.0.0.0 Safari/537.36'
389
+ }
390
+
391
+ headers_api_chat = {
392
+ 'Content-Type': 'application/json',
393
+ 'Referer': referer_url
394
+ }
395
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
396
+
397
+ payload_api_chat = {
398
+ "messages": [
399
+ {
400
+ "id": chat_id,
401
+ "content": formatted_prompt,
402
+ "role": "user"
403
+ }
404
+ ],
405
+ "id": chat_id,
406
+ "previewToken": None,
407
+ "userId": None,
408
+ "codeModelMode": True,
409
+ "agentMode": agent_mode,
410
+ "trendingAgentMode": trending_agent_mode,
411
+ "isMicMode": False,
412
+ "userSystemPrompt": None,
413
+ "maxTokens": 1024,
414
+ "playgroundTopP": 0.9,
415
+ "playgroundTemperature": 0.5,
416
+ "isChromeExt": False,
417
+ "githubToken": None,
418
+ "clickedAnswer2": False,
419
+ "clickedAnswer3": False,
420
+ "clickedForceWebSearch": False,
421
+ "visitFromDelta": False,
422
+ "mobileClient": False,
423
+ "webSearchMode": websearch,
424
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
425
+ }
426
+
427
+ headers_chat = {
428
+ 'Accept': 'text/x-component',
429
+ 'Content-Type': 'text/plain;charset=UTF-8',
430
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
431
+ 'next-action': next_action,
432
+ 'next-router-state-tree': next_router_state_tree,
433
+ 'next-url': '/'
434
+ }
435
+ headers_chat_combined = {**common_headers, **headers_chat}
436
+
437
+ data_chat = '[]'
438
+
439
+ async with ClientSession(headers=common_headers) as session:
440
+ try:
441
+ async with session.post(
442
+ cls.api_endpoint,
443
+ headers=headers_api_chat_combined,
444
+ json=payload_api_chat,
445
+ proxy=proxy
446
+ ) as response_api_chat:
447
+ response_api_chat.raise_for_status()
448
+ text = await response_api_chat.text()
449
+ cleaned_response = cls.clean_response(text)
450
+
451
+ if model in cls.image_models:
452
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
453
+ if match:
454
+ image_url = match.group(1)
455
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
456
+ yield image_response
457
+ else:
458
+ yield cleaned_response
459
+ else:
460
+ if websearch:
461
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
462
+ if match:
463
+ source_part = match.group(1).strip()
464
+ answer_part = cleaned_response[match.end():].strip()
465
+ try:
466
+ sources = json.loads(source_part)
467
+ source_formatted = "**Source:**\n"
468
+ for item in sources:
469
+ title = item.get('title', 'No Title')
470
+ link = item.get('link', '#')
471
+ position = item.get('position', '')
472
+ source_formatted += f"{position}. [{title}]({link})\n"
473
+ final_response = f"{answer_part}\n\n{source_formatted}"
474
+ except json.JSONDecodeError:
475
+ final_response = f"{answer_part}\n\nSource information is unavailable."
476
+ else:
477
+ final_response = cleaned_response
478
+ else:
479
+ if '$~~~$' in cleaned_response:
480
+ final_response = cleaned_response.split('$~~~$')[0].strip()
481
+ else:
482
+ final_response = cleaned_response
483
+
484
+ yield final_response
485
+ except ClientResponseError as e:
486
+ error_text = f"Error {e.status}: {e.message}"
487
+ try:
488
+ error_response = await e.response.text()
489
+ cleaned_error = cls.clean_response(error_response)
490
+ error_text += f" - {cleaned_error}"
491
+ except Exception:
492
+ pass
493
+ yield error_text
494
+ except Exception as e:
495
+ yield f"Unexpected error during /api/chat request: {str(e)}"
496
+
497
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
498
+
499
+ try:
500
+ async with session.post(
501
+ chat_url,
502
+ headers=headers_chat_combined,
503
+ data=data_chat,
504
+ proxy=proxy
505
+ ) as response_chat:
506
+ response_chat.raise_for_status()
507
+ pass
508
+ except ClientResponseError as e:
509
+ error_text = f"Error {e.status}: {e.message}"
510
+ try:
511
+ error_response = await e.response.text()
512
+ cleaned_error = cls.clean_response(error_response)
513
+ error_text += f" - {cleaned_error}"
514
+ except Exception:
515
+ pass
516
+ yield error_text
517
+ except Exception as e:
518
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
519
+
520
  # Custom exception for model not working
521
  class ModelNotWorkingException(Exception):
522
  def __init__(self, model: str):
 
629
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
630
  raise HTTPException(status_code=400, detail="Requested model is not available.")
631
 
632
+ # Create an asynchronous generator for streaming response
633
+ generator = Blackbox.create_async_generator(
634
  model=request.model,
635
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
636
  temperature=request.temperature,
637
  max_tokens=request.max_tokens
638
  )
639
 
640
+ logger.info(f"Started streaming response for API key: {api_key} | IP: {client_ip}")
641
+ return StreamingResponse(generator, media_type="text/event-stream")
642
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643
  except ModelNotWorkingException as e:
644
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
645
  raise HTTPException(status_code=503, detail=str(e))