Niansuh commited on
Commit
8f51a3b
·
verified ·
1 Parent(s): 6924925

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +225 -32
main.py CHANGED
@@ -12,7 +12,7 @@ from typing import List, Dict, Any, Optional, Union, AsyncGenerator
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
- from fastapi.responses import JSONResponse
16
  from pydantic import BaseModel
17
  from datetime import datetime
18
 
@@ -317,6 +317,199 @@ class Blackbox:
317
  except Exception as e:
318
  return f"Unexpected error during /api/chat request: {str(e)}"
319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  # Custom exception for model not working
321
  class ModelNotWorkingException(Exception):
322
  def __init__(self, model: str):
@@ -414,6 +607,7 @@ class ChatRequest(BaseModel):
414
  frequency_penalty: Optional[float] = 0.0
415
  logit_bias: Optional[Dict[str, float]] = None
416
  user: Optional[str] = None
 
417
 
418
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
419
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
@@ -421,7 +615,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
421
  # Redact user messages only for logging purposes
422
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
423
 
424
- logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}")
425
 
426
  try:
427
  # Validate that the requested model is available
@@ -429,36 +623,35 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
429
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
430
  raise HTTPException(status_code=400, detail="Requested model is not available.")
431
 
432
- # Process the request with actual message content, but don't log it
433
- response_content = await Blackbox.generate_response(
434
- model=request.model,
435
- messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
436
- temperature=request.temperature,
437
- max_tokens=request.max_tokens
438
- )
439
-
440
- logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
441
- return {
442
- "id": f"chatcmpl-{uuid.uuid4()}",
443
- "object": "chat.completion",
444
- "created": int(datetime.now().timestamp()),
445
- "model": request.model,
446
- "choices": [
447
- {
448
- "index": 0,
449
- "message": {
450
- "role": "assistant",
451
- "content": response_content
452
- },
453
- "finish_reason": "stop"
454
- }
455
- ],
456
- "usage": {
457
- "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
458
- "completion_tokens": len(response_content.split()),
459
- "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
460
- },
461
- }
462
  except ModelNotWorkingException as e:
463
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
464
  raise HTTPException(status_code=503, detail=str(e))
 
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
+ from fastapi.responses import JSONResponse, StreamingResponse
16
  from pydantic import BaseModel
17
  from datetime import datetime
18
 
 
317
  except Exception as e:
318
  return f"Unexpected error during /api/chat request: {str(e)}"
319
 
320
+ @classmethod
321
+ async def create_async_generator(
322
+ cls,
323
+ model: str,
324
+ messages: List[Dict[str, str]],
325
+ proxy: Optional[str] = None,
326
+ websearch: bool = False,
327
+ **kwargs
328
+ ) -> AsyncGenerator[Union[str, ImageResponseModel], None]:
329
+ """
330
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
331
+
332
+ Parameters:
333
+ model (str): Model to use for generating responses.
334
+ messages (List[Dict[str, str]]): Message history.
335
+ proxy (Optional[str]): Proxy URL, if needed.
336
+ websearch (bool): Enables or disables web search mode.
337
+ **kwargs: Additional keyword arguments.
338
+
339
+ Yields:
340
+ Union[str, ImageResponseModel]: Segments of the generated response or ImageResponse objects.
341
+ """
342
+ model = cls.get_model(model)
343
+
344
+ chat_id = cls.generate_random_string()
345
+ next_action = cls.generate_next_action()
346
+ next_router_state_tree = cls.generate_next_router_state_tree()
347
+
348
+ agent_mode = cls.agentMode.get(model, {})
349
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
350
+
351
+ prefix = cls.model_prefixes.get(model, "")
352
+
353
+ formatted_prompt = ""
354
+ for message in messages:
355
+ role = message.get('role', '').capitalize()
356
+ content = message.get('content', '')
357
+ if role and content:
358
+ formatted_prompt += f"{role}: {content}\n"
359
+
360
+ if prefix:
361
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
362
+
363
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
364
+ referer_url = f"{cls.url}{referer_path}"
365
+
366
+ common_headers = {
367
+ 'accept': '*/*',
368
+ 'accept-language': 'en-US,en;q=0.9',
369
+ 'cache-control': 'no-cache',
370
+ 'origin': cls.url,
371
+ 'pragma': 'no-cache',
372
+ 'priority': 'u=1, i',
373
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
374
+ 'sec-ch-ua-mobile': '?0',
375
+ 'sec-ch-ua-platform': '"Linux"',
376
+ 'sec-fetch-dest': 'empty',
377
+ 'sec-fetch-mode': 'cors',
378
+ 'sec-fetch-site': 'same-origin',
379
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
380
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
381
+ 'Chrome/129.0.0.0 Safari/537.36'
382
+ }
383
+
384
+ headers_api_chat = {
385
+ 'Content-Type': 'application/json',
386
+ 'Referer': referer_url
387
+ }
388
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
389
+
390
+ payload_api_chat = {
391
+ "messages": [
392
+ {
393
+ "id": chat_id,
394
+ "content": formatted_prompt,
395
+ "role": "user"
396
+ }
397
+ ],
398
+ "id": chat_id,
399
+ "previewToken": None,
400
+ "userId": None,
401
+ "codeModelMode": True,
402
+ "agentMode": agent_mode,
403
+ "trendingAgentMode": trending_agent_mode,
404
+ "isMicMode": False,
405
+ "userSystemPrompt": None,
406
+ "maxTokens": 1024,
407
+ "playgroundTopP": 0.9,
408
+ "playgroundTemperature": 0.5,
409
+ "isChromeExt": False,
410
+ "githubToken": None,
411
+ "clickedAnswer2": False,
412
+ "clickedAnswer3": False,
413
+ "clickedForceWebSearch": False,
414
+ "visitFromDelta": False,
415
+ "mobileClient": False,
416
+ "webSearchMode": websearch,
417
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
418
+ }
419
+
420
+ headers_chat = {
421
+ 'Accept': 'text/x-component',
422
+ 'Content-Type': 'text/plain;charset=UTF-8',
423
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
424
+ 'next-action': next_action,
425
+ 'next-router-state-tree': next_router_state_tree,
426
+ 'next-url': '/'
427
+ }
428
+ headers_chat_combined = {**common_headers, **headers_chat}
429
+
430
+ data_chat = '[]'
431
+
432
+ async with ClientSession(headers=common_headers) as session:
433
+ try:
434
+ async with session.post(
435
+ cls.api_endpoint,
436
+ headers=headers_api_chat_combined,
437
+ json=payload_api_chat,
438
+ proxy=proxy
439
+ ) as response_api_chat:
440
+ response_api_chat.raise_for_status()
441
+ text = await response_api_chat.text()
442
+ cleaned_response = cls.clean_response(text)
443
+
444
+ if model in cls.image_models:
445
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
446
+ if match:
447
+ image_url = match.group(1)
448
+ image_response = ImageResponseModel(images=image_url, alt="Generated Image")
449
+ yield image_response
450
+ else:
451
+ yield cleaned_response
452
+ else:
453
+ if websearch:
454
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
455
+ if match:
456
+ source_part = match.group(1).strip()
457
+ answer_part = cleaned_response[match.end():].strip()
458
+ try:
459
+ sources = json.loads(source_part)
460
+ source_formatted = "**Source:**\n"
461
+ for item in sources:
462
+ title = item.get('title', 'No Title')
463
+ link = item.get('link', '#')
464
+ position = item.get('position', '')
465
+ source_formatted += f"{position}. [{title}]({link})\n"
466
+ final_response = f"{answer_part}\n\n{source_formatted}"
467
+ except json.JSONDecodeError:
468
+ final_response = f"{answer_part}\n\nSource information is unavailable."
469
+ else:
470
+ final_response = cleaned_response
471
+ else:
472
+ if '$~~~$' in cleaned_response:
473
+ final_response = cleaned_response.split('$~~~$')[0].strip()
474
+ else:
475
+ final_response = cleaned_response
476
+
477
+ yield final_response
478
+ except ClientResponseError as e:
479
+ error_text = f"Error {e.status}: {e.message}"
480
+ try:
481
+ error_response = await e.response.text()
482
+ cleaned_error = cls.clean_response(error_response)
483
+ error_text += f" - {cleaned_error}"
484
+ except Exception:
485
+ pass
486
+ yield error_text
487
+ except Exception as e:
488
+ yield f"Unexpected error during /api/chat request: {str(e)}"
489
+
490
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
491
+
492
+ try:
493
+ async with session.post(
494
+ chat_url,
495
+ headers=headers_chat_combined,
496
+ data=data_chat,
497
+ proxy=proxy
498
+ ) as response_chat:
499
+ response_chat.raise_for_status()
500
+ pass
501
+ except ClientResponseError as e:
502
+ error_text = f"Error {e.status}: {e.message}"
503
+ try:
504
+ error_response = await e.response.text()
505
+ cleaned_error = cls.clean_response(error_response)
506
+ error_text += f" - {cleaned_error}"
507
+ except Exception:
508
+ pass
509
+ yield error_text
510
+ except Exception as e:
511
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
512
+
513
  # Custom exception for model not working
514
  class ModelNotWorkingException(Exception):
515
  def __init__(self, model: str):
 
607
  frequency_penalty: Optional[float] = 0.0
608
  logit_bias: Optional[Dict[str, float]] = None
609
  user: Optional[str] = None
610
+ stream: Optional[bool] = False # Added stream parameter
611
 
612
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
613
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
 
615
  # Redact user messages only for logging purposes
616
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
617
 
618
+ logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages} | Stream: {request.stream}")
619
 
620
  try:
621
  # Validate that the requested model is available
 
623
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
624
  raise HTTPException(status_code=400, detail="Requested model is not available.")
625
 
626
+ if request.stream:
627
+ # Streaming response
628
+ async def content_generator():
629
+ async for chunk in Blackbox.create_async_generator(
630
+ model=request.model,
631
+ messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
632
+ proxy=None, # Add proxy if needed
633
+ websearch=False # Modify if websearch is needed
634
+ ):
635
+ if isinstance(chunk, ImageResponseModel):
636
+ yield f"Image URL: {chunk.images}\n"
637
+ else:
638
+ yield f"{chunk}\n"
639
+
640
+ logger.info(f"Initiating streaming response for API key: {api_key} | IP: {client_ip}")
641
+ return StreamingResponse(content_generator(), media_type='text/plain')
642
+ else:
643
+ # Non-streaming response
644
+ response_content = await Blackbox.generate_response(
645
+ model=request.model,
646
+ messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
647
+ temperature=request.temperature,
648
+ max_tokens=request.max_tokens
649
+ )
650
+
651
+ logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
652
+ return {
653
+ "content": response_content
654
+ }
 
655
  except ModelNotWorkingException as e:
656
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
657
  raise HTTPException(status_code=503, detail=str(e))