Niansuh commited on
Commit
dad805d
·
verified ·
1 Parent(s): 02fd09b

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +297 -59
main.py CHANGED
@@ -12,7 +12,7 @@ from typing import List, Dict, Any, Optional, Union, AsyncGenerator
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
- from fastapi.responses import JSONResponse
16
  from pydantic import BaseModel
17
  from datetime import datetime
18
 
@@ -44,6 +44,30 @@ class ImageResponse(BaseModel):
44
  images: str
45
  alt: str
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # Updated Blackbox class with new models and functionalities
48
  class Blackbox:
49
  label = "Blackbox AI"
@@ -192,7 +216,12 @@ class Blackbox:
192
  def clean_response(text: str) -> str:
193
  pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
194
  cleaned_text = re.sub(pattern, '', text)
195
- return cleaned_text
 
 
 
 
 
196
 
197
  @classmethod
198
  async def generate_response(
@@ -307,12 +336,234 @@ class Blackbox:
307
  logger.exception(f"Unexpected error during /api/chat request: {str(e)}")
308
  return f"Unexpected error during /api/chat request: {str(e)}"
309
 
310
- # Custom exception for model not working
311
- class ModelNotWorkingException(Exception):
312
- def __init__(self, model: str):
313
- self.model = model
314
- self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
315
- super().__init__(self.message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
  async def cleanup_rate_limit_stores():
318
  """
@@ -356,55 +607,6 @@ class Blackbox:
356
  raise HTTPException(status_code=401, detail='Invalid API key')
357
  return api_key
358
 
359
- # FastAPI app setup
360
- app = FastAPI()
361
-
362
- # Add the cleanup task when the app starts
363
- @app.on_event("startup")
364
- async def startup_event():
365
- asyncio.create_task(cleanup_rate_limit_stores())
366
- logger.info("Started rate limit store cleanup task.")
367
-
368
- # Middleware to enhance security and enforce Content-Type for specific endpoints
369
- @app.middleware("http")
370
- async def security_middleware(request: Request, call_next):
371
- client_ip = request.client.host
372
- # Enforce that POST requests to /v1/chat/completions must have Content-Type: application/json
373
- if request.method == "POST" and request.url.path == "/v1/chat/completions":
374
- content_type = request.headers.get("Content-Type")
375
- if content_type != "application/json":
376
- logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}")
377
- return JSONResponse(
378
- status_code=400,
379
- content={
380
- "error": {
381
- "message": "Content-Type must be application/json",
382
- "type": "invalid_request_error",
383
- "param": None,
384
- "code": None
385
- }
386
- },
387
- )
388
- response = await call_next(request)
389
- return response
390
-
391
- # Request Models
392
- class Message(BaseModel):
393
- role: str
394
- content: str
395
-
396
- class ChatRequest(BaseModel):
397
- model: str
398
- messages: List[Message]
399
- temperature: Optional[float] = 1.0
400
- top_p: Optional[float] = 1.0
401
- n: Optional[int] = 1
402
- max_tokens: Optional[int] = None
403
- presence_penalty: Optional[float] = 0.0
404
- frequency_penalty: Optional[float] = 0.0
405
- logit_bias: Optional[Dict[str, float]] = None
406
- user: Optional[str] = None
407
-
408
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
409
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
410
  client_ip = req.client.host
@@ -459,14 +661,12 @@ class Blackbox:
459
  logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
460
  raise HTTPException(status_code=500, detail=str(e))
461
 
462
- # Endpoint: GET /v1/models
463
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
464
  async def get_models(req: Request):
465
  client_ip = req.client.host
466
  logger.info(f"Fetching available models from IP: {client_ip}")
467
  return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
468
 
469
- # Endpoint: GET /v1/health
470
  @app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
471
  async def health_check(req: Request):
472
  client_ip = req.client.host
@@ -490,6 +690,44 @@ class Blackbox:
490
  },
491
  )
492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  if __name__ == "__main__":
494
  import uvicorn
495
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
+ from fastapi.responses import JSONResponse, StreamingResponse
16
  from pydantic import BaseModel
17
  from datetime import datetime
18
 
 
44
  images: str
45
  alt: str
46
 
47
+ # Request Models
48
+ class Message(BaseModel):
49
+ role: str
50
+ content: str
51
+
52
+ class ChatRequest(BaseModel):
53
+ model: str
54
+ messages: List[Message]
55
+ temperature: Optional[float] = 1.0
56
+ top_p: Optional[float] = 1.0
57
+ n: Optional[int] = 1
58
+ max_tokens: Optional[int] = None
59
+ presence_penalty: Optional[float] = 0.0
60
+ frequency_penalty: Optional[float] = 0.0
61
+ logit_bias: Optional[Dict[str, float]] = None
62
+ user: Optional[str] = None
63
+
64
+ # Custom exception for model not working
65
+ class ModelNotWorkingException(Exception):
66
+ def __init__(self, model: str):
67
+ self.model = model
68
+ self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
69
+ super().__init__(self.message)
70
+
71
  # Updated Blackbox class with new models and functionalities
72
  class Blackbox:
73
  label = "Blackbox AI"
 
216
  def clean_response(text: str) -> str:
217
  pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
218
  cleaned_text = re.sub(pattern, '', text)
219
+ try:
220
+ response_json = json.loads(cleaned_text)
221
+ # Assuming the response is in {"response": "Your answer here."} format
222
+ return response_json.get("response", cleaned_text)
223
+ except json.JSONDecodeError:
224
+ return cleaned_text
225
 
226
  @classmethod
227
  async def generate_response(
 
336
  logger.exception(f"Unexpected error during /api/chat request: {str(e)}")
337
  return f"Unexpected error during /api/chat request: {str(e)}"
338
 
339
+ @classmethod
340
+ async def create_async_generator(
341
+ cls,
342
+ model: str,
343
+ messages: List[Dict[str, str]],
344
+ proxy: Optional[str] = None,
345
+ websearch: bool = False,
346
+ **kwargs
347
+ ) -> AsyncGenerator[Union[str, ImageResponse], None]:
348
+ """
349
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
350
+
351
+ Parameters:
352
+ model (str): Model to use for generating responses.
353
+ messages (List[Dict[str, str]]): Message history.
354
+ proxy (Optional[str]): Proxy URL, if needed.
355
+ websearch (bool): Enables or disables web search mode.
356
+ **kwargs: Additional keyword arguments.
357
+
358
+ Yields:
359
+ Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
360
+ """
361
+ model = cls.get_model(model)
362
+
363
+ chat_id = cls.generate_random_string()
364
+ next_action = cls.generate_next_action()
365
+ next_router_state_tree = cls.generate_next_router_state_tree()
366
+
367
+ agent_mode = cls.agentMode.get(model, {})
368
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
369
+
370
+ prefix = cls.model_prefixes.get(model, "")
371
+
372
+ formatted_prompt = ""
373
+ for message in messages:
374
+ role = message.get('role', '').capitalize()
375
+ content = message.get('content', '')
376
+ if role and content:
377
+ formatted_prompt += f"{role}: {content}\n"
378
+
379
+ if prefix:
380
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
381
+
382
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
383
+ referer_url = f"{cls.url}{referer_path}"
384
+
385
+ common_headers = {
386
+ 'accept': '*/*',
387
+ 'accept-language': 'en-US,en;q=0.9',
388
+ 'cache-control': 'no-cache',
389
+ 'origin': cls.url,
390
+ 'pragma': 'no-cache',
391
+ 'priority': 'u=1, i',
392
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
393
+ 'sec-ch-ua-mobile': '?0',
394
+ 'sec-ch-ua-platform': '"Linux"',
395
+ 'sec-fetch-dest': 'empty',
396
+ 'sec-fetch-mode': 'cors',
397
+ 'sec-fetch-site': 'same-origin',
398
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
399
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
400
+ 'Chrome/129.0.0.0 Safari/537.36'
401
+ }
402
+
403
+ headers_api_chat = {
404
+ 'Content-Type': 'application/json',
405
+ 'Referer': referer_url
406
+ }
407
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
408
+
409
+ payload_api_chat = {
410
+ "messages": [
411
+ {
412
+ "id": chat_id,
413
+ "content": formatted_prompt,
414
+ "role": "user"
415
+ }
416
+ ],
417
+ "id": chat_id,
418
+ "previewToken": None,
419
+ "userId": None,
420
+ "codeModelMode": True,
421
+ "agentMode": agent_mode,
422
+ "trendingAgentMode": trending_agent_mode,
423
+ "isMicMode": False,
424
+ "userSystemPrompt": None,
425
+ "maxTokens": 1024,
426
+ "playgroundTopP": 0.9,
427
+ "playgroundTemperature": 0.5,
428
+ "isChromeExt": False,
429
+ "githubToken": None,
430
+ "clickedAnswer2": False,
431
+ "clickedAnswer3": False,
432
+ "clickedForceWebSearch": False,
433
+ "visitFromDelta": False,
434
+ "mobileClient": False,
435
+ "webSearchMode": False,
436
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
437
+ }
438
+
439
+ headers_chat = {
440
+ 'Accept': 'text/x-component',
441
+ 'Content-Type': 'text/plain;charset=UTF-8',
442
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
443
+ 'next-action': next_action,
444
+ 'next-router-state-tree': next_router_state_tree,
445
+ 'next-url': '/'
446
+ }
447
+ headers_chat_combined = {**common_headers, **headers_chat}
448
+
449
+ data_chat = '[]'
450
+
451
+ async with ClientSession(headers=common_headers) as session:
452
+ try:
453
+ async with session.post(
454
+ cls.api_endpoint,
455
+ headers=headers_api_chat_combined,
456
+ json=payload_api_chat,
457
+ proxy=proxy
458
+ ) as response_api_chat:
459
+ response_api_chat.raise_for_status()
460
+ text = await response_api_chat.text()
461
+ logger.debug(f"Raw response from Blackbox API: {text}") # Log raw response
462
+ cleaned_response = cls.clean_response(text)
463
+ logger.debug(f"Cleaned response: {cleaned_response}") # Log cleaned response
464
+
465
+ if model in cls.image_models:
466
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
467
+ if match:
468
+ image_url = match.group(1)
469
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
470
+ yield image_response
471
+ else:
472
+ yield cleaned_response
473
+ else:
474
+ if websearch:
475
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
476
+ if match:
477
+ source_part = match.group(1).strip()
478
+ answer_part = cleaned_response[match.end():].strip()
479
+ try:
480
+ sources = json.loads(source_part)
481
+ source_formatted = "**Source:**\n"
482
+ for item in sources:
483
+ title = item.get('title', 'No Title')
484
+ link = item.get('link', '#')
485
+ position = item.get('position', '')
486
+ source_formatted += f"{position}. [{title}]({link})\n"
487
+ final_response = f"{answer_part}\n\n{source_formatted}"
488
+ except json.JSONDecodeError:
489
+ final_response = f"{answer_part}\n\nSource information is unavailable."
490
+ else:
491
+ final_response = cleaned_response
492
+ else:
493
+ if '$~~~$' in cleaned_response:
494
+ final_response = cleaned_response.split('$~~~$')[0].strip()
495
+ else:
496
+ final_response = cleaned_response
497
+
498
+ yield final_response
499
+ except ClientResponseError as e:
500
+ error_text = f"Error {e.status}: {e.message}"
501
+ try:
502
+ error_response = await e.response.text()
503
+ cleaned_error = cls.clean_response(error_response)
504
+ error_text += f" - {cleaned_error}"
505
+ logger.error(f"Blackbox API ClientResponseError: {error_text}")
506
+ except Exception:
507
+ pass
508
+ yield error_text
509
+ except Exception as e:
510
+ yield f"Unexpected error during /api/chat request: {str(e)}"
511
+
512
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
513
+
514
+ try:
515
+ async with session.post(
516
+ chat_url,
517
+ headers=headers_chat_combined,
518
+ data=data_chat,
519
+ proxy=proxy
520
+ ) as response_chat:
521
+ response_chat.raise_for_status()
522
+ pass
523
+ except ClientResponseError as e:
524
+ error_text = f"Error {e.status}: {e.message}"
525
+ try:
526
+ error_response = await e.response.text()
527
+ cleaned_error = cls.clean_response(error_response)
528
+ error_text += f" - {cleaned_error}"
529
+ logger.error(f"Blackbox API ClientResponseError during chat URL request: {error_text}")
530
+ except Exception:
531
+ pass
532
+ yield error_text
533
+ except Exception as e:
534
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
535
+
536
+ # FastAPI app setup
537
+ app = FastAPI()
538
+
539
+ # Add the cleanup task when the app starts
540
+ @app.on_event("startup")
541
+ async def startup_event():
542
+ asyncio.create_task(cleanup_rate_limit_stores())
543
+ logger.info("Started rate limit store cleanup task.")
544
+
545
+ # Middleware to enhance security and enforce Content-Type for specific endpoints
546
+ @app.middleware("http")
547
+ async def security_middleware(request: Request, call_next):
548
+ client_ip = request.client.host
549
+ # Enforce that POST requests to /v1/chat/completions must have Content-Type: application/json
550
+ if request.method == "POST" and request.url.path == "/v1/chat/completions":
551
+ content_type = request.headers.get("Content-Type")
552
+ if content_type != "application/json":
553
+ logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}")
554
+ return JSONResponse(
555
+ status_code=400,
556
+ content={
557
+ "error": {
558
+ "message": "Content-Type must be application/json",
559
+ "type": "invalid_request_error",
560
+ "param": None,
561
+ "code": None
562
+ }
563
+ },
564
+ )
565
+ response = await call_next(request)
566
+ return response
567
 
568
  async def cleanup_rate_limit_stores():
569
  """
 
607
  raise HTTPException(status_code=401, detail='Invalid API key')
608
  return api_key
609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
611
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
612
  client_ip = req.client.host
 
661
  logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
662
  raise HTTPException(status_code=500, detail=str(e))
663
 
 
664
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
665
  async def get_models(req: Request):
666
  client_ip = req.client.host
667
  logger.info(f"Fetching available models from IP: {client_ip}")
668
  return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
669
 
 
670
  @app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
671
  async def health_check(req: Request):
672
  client_ip = req.client.host
 
690
  },
691
  )
692
 
693
+ # Optional: Additional Endpoint for Streaming Responses (Using create_async_generator)
694
+ # This endpoint leverages the new create_async_generator method for streaming responses.
695
+ # Note: Streaming responses may require clients that support Server-Sent Events (SSE) or WebSockets.
696
+
697
+ @app.post("/v1/chat/completions/stream", dependencies=[Depends(rate_limiter_per_ip)])
698
+ async def chat_completions_stream(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
699
+ client_ip = req.client.host
700
+ # Redact user messages only for logging purposes
701
+ redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
702
+
703
+ logger.info(f"Received streaming chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}")
704
+
705
+ try:
706
+ # Validate that the requested model is available
707
+ if request.model not in Blackbox.models and request.model not in Blackbox.model_aliases:
708
+ logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
709
+ raise HTTPException(status_code=400, detail="Requested model is not available.")
710
+
711
+ # Create an asynchronous generator for the response
712
+ async_generator = Blackbox.create_async_generator(
713
+ model=request.model,
714
+ messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
715
+ temperature=request.temperature,
716
+ max_tokens=request.max_tokens
717
+ )
718
+
719
+ logger.info(f"Started streaming response for API key: {api_key} | IP: {client_ip}")
720
+ return StreamingResponse(async_generator, media_type="text/event-stream")
721
+ except ModelNotWorkingException as e:
722
+ logger.warning(f"Model not working: {e} | IP: {client_ip}")
723
+ raise HTTPException(status_code=503, detail=str(e))
724
+ except HTTPException as he:
725
+ logger.warning(f"HTTPException: {he.detail} | IP: {client_ip}")
726
+ raise he
727
+ except Exception as e:
728
+ logger.exception(f"An unexpected error occurred while processing the streaming chat completions request from IP: {client_ip}.")
729
+ raise HTTPException(status_code=500, detail=str(e))
730
+
731
  if __name__ == "__main__":
732
  import uvicorn
733
  uvicorn.run(app, host="0.0.0.0", port=8000)