Niansuh commited on
Commit
eb6514b
·
verified ·
1 Parent(s): cdf9b75

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +48 -71
main.py CHANGED
@@ -39,13 +39,6 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
39
  CLEANUP_INTERVAL = 60 # seconds
40
  RATE_LIMIT_WINDOW = 60 # seconds
41
 
42
- class ImageResponse:
43
- def __init__(self, images: Union[str, List[str]], alt: str = "Generated Image"):
44
- if isinstance(images, str):
45
- images = [images]
46
- self.images = images
47
- self.alt = alt
48
-
49
  class Blackbox:
50
  label = "Blackbox AI"
51
  url = "https://www.blackbox.ai"
@@ -191,34 +184,20 @@ class Blackbox:
191
 
192
  @staticmethod
193
  def clean_response(text: str) -> str:
194
- pattern = r'^\$\@\$v=undefined-rv1\$\@'
195
  cleaned_text = re.sub(pattern, '', text)
196
  return cleaned_text
197
 
198
  @classmethod
199
- async def create_async_generator(
200
  cls,
201
  model: str,
202
  messages: List[Dict[str, str]],
203
  proxy: Optional[str] = None,
204
  websearch: bool = False,
205
  **kwargs
206
- ) -> AsyncGenerator[Union[str, ImageResponse], None]:
207
- """
208
- Creates an asynchronous generator for streaming responses from Blackbox AI.
209
-
210
- Parameters:
211
- model (str): Model to use for generating responses.
212
- messages (List[Dict[str, str]]): Message history.
213
- proxy (Optional[str]): Proxy URL, if needed.
214
- websearch (bool): Enables or disables web search mode.
215
- **kwargs: Additional keyword arguments.
216
-
217
- Yields:
218
- Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
219
- """
220
  model = cls.get_model(model)
221
-
222
  chat_id = cls.generate_random_string()
223
  next_action = cls.generate_next_action()
224
  next_router_state_tree = cls.generate_next_router_state_tree()
@@ -323,10 +302,9 @@ class Blackbox:
323
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
324
  if match:
325
  image_url = match.group(1)
326
- image_response = ImageResponse(images=image_url, alt="Generated Image")
327
- yield image_response
328
  else:
329
- yield cleaned_response
330
  else:
331
  if websearch:
332
  match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
@@ -347,12 +325,12 @@ class Blackbox:
347
  else:
348
  final_response = cleaned_response
349
  else:
350
- if '$~~~' in cleaned_response:
351
- final_response = cleaned_response.split('$~~~')[0].strip()
352
  else:
353
  final_response = cleaned_response
354
 
355
- yield final_response
356
  except ClientResponseError as e:
357
  error_text = f"Error {e.status}: {e.message}"
358
  try:
@@ -361,11 +339,10 @@ class Blackbox:
361
  error_text += f" - {cleaned_error}"
362
  except Exception:
363
  pass
364
- yield error_text
365
  except Exception as e:
366
- yield f"Unexpected error during /api/chat request: {str(e)}"
367
 
368
- # Not clear what to do with this second request; keeping it for compatibility
369
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
370
 
371
  try:
@@ -385,9 +362,9 @@ class Blackbox:
385
  error_text += f" - {cleaned_error}"
386
  except Exception:
387
  pass
388
- yield error_text
389
  except Exception as e:
390
- yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
391
 
392
  # Custom exception for model not working
393
  class ModelNotWorkingException(Exception):
@@ -478,7 +455,6 @@ class Message(BaseModel):
478
  class ChatRequest(BaseModel):
479
  model: str
480
  messages: List[Message]
481
- stream: Optional[bool] = False # Added for streaming support
482
  temperature: Optional[float] = 1.0
483
  top_p: Optional[float] = 1.0
484
  n: Optional[int] = 1
@@ -487,17 +463,8 @@ class ChatRequest(BaseModel):
487
  frequency_penalty: Optional[float] = 0.0
488
  logit_bias: Optional[Dict[str, float]] = None
489
  user: Optional[str] = None
490
-
491
- # Helper function to collect responses from async generator
492
- async def collect_response_content(generator: AsyncGenerator[Union[str, ImageResponse], None]) -> str:
493
- response_content = ''
494
- async for chunk in generator:
495
- if isinstance(chunk, str):
496
- response_content += chunk
497
- elif isinstance(chunk, ImageResponse):
498
- # Handle image response if needed
499
- response_content += f"[Image: {chunk.alt}] {', '.join(chunk.images)}\n"
500
- return response_content
501
 
502
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
503
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
@@ -514,36 +481,40 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
514
  raise HTTPException(status_code=400, detail="Requested model is not available.")
515
 
516
  # Process the request with actual message content, but don't log it
517
- generator = Blackbox.create_async_generator(
518
  model=request.model,
519
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
520
  temperature=request.temperature,
521
- max_tokens=request.max_tokens
 
522
  )
523
 
524
  if request.stream:
525
  async def stream_response():
526
- async for chunk in generator:
527
- if isinstance(chunk, str):
528
- data = json.dumps({
529
- "choices": [{
530
- "delta": {"content": chunk},
531
- "index": 0,
532
- "finish_reason": None
533
- }],
534
- "model": request.model,
535
- "id": f"chatcmpl-{uuid.uuid4()}",
536
- "object": "chat.completion.chunk",
537
- "created": int(datetime.now().timestamp()),
538
- })
539
- yield f"data: {data}\n\n"
540
- logger.info(f"Streaming response enabled for API key: {api_key} | IP: {client_ip}")
541
-
542
  return StreamingResponse(stream_response(), media_type="text/event-stream")
543
  else:
544
- response_content = await collect_response_content(generator)
 
 
 
 
 
 
 
 
 
545
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
546
- return {
 
547
  "id": f"chatcmpl-{uuid.uuid4()}",
548
  "object": "chat.completion",
549
  "created": int(datetime.now().timestamp()),
@@ -553,17 +524,23 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
553
  "index": 0,
554
  "message": {
555
  "role": "assistant",
556
- "content": response_content
557
  },
558
  "finish_reason": "stop"
559
  }
560
  ],
561
  "usage": {
562
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
563
- "completion_tokens": len(response_content.split()),
564
- "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
565
  },
566
  }
 
 
 
 
 
 
567
  except ModelNotWorkingException as e:
568
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
569
  raise HTTPException(status_code=503, detail=str(e))
@@ -607,4 +584,4 @@ async def http_exception_handler(request: Request, exc: HTTPException):
607
 
608
  if __name__ == "__main__":
609
  import uvicorn
610
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
39
  CLEANUP_INTERVAL = 60 # seconds
40
  RATE_LIMIT_WINDOW = 60 # seconds
41
 
 
 
 
 
 
 
 
42
  class Blackbox:
43
  label = "Blackbox AI"
44
  url = "https://www.blackbox.ai"
 
184
 
185
  @staticmethod
186
  def clean_response(text: str) -> str:
187
+ pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
188
  cleaned_text = re.sub(pattern, '', text)
189
  return cleaned_text
190
 
191
  @classmethod
192
+ async def generate_response(
193
  cls,
194
  model: str,
195
  messages: List[Dict[str, str]],
196
  proxy: Optional[str] = None,
197
  websearch: bool = False,
198
  **kwargs
199
+ ) -> AsyncGenerator[Union[str, Dict[str, Any]], None]:
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  model = cls.get_model(model)
 
201
  chat_id = cls.generate_random_string()
202
  next_action = cls.generate_next_action()
203
  next_router_state_tree = cls.generate_next_router_state_tree()
 
302
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
303
  if match:
304
  image_url = match.group(1)
305
+ yield {"type": "image", "url": image_url}
 
306
  else:
307
+ yield {"type": "text", "content": cleaned_response}
308
  else:
309
  if websearch:
310
  match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
 
325
  else:
326
  final_response = cleaned_response
327
  else:
328
+ if '$~~~$' in cleaned_response:
329
+ final_response = cleaned_response.split('$~~~$')[0].strip()
330
  else:
331
  final_response = cleaned_response
332
 
333
+ yield {"type": "text", "content": final_response}
334
  except ClientResponseError as e:
335
  error_text = f"Error {e.status}: {e.message}"
336
  try:
 
339
  error_text += f" - {cleaned_error}"
340
  except Exception:
341
  pass
342
+ yield {"type": "error", "content": error_text}
343
  except Exception as e:
344
+ yield {"type": "error", "content": f"Unexpected error during /api/chat request: {str(e)}"}
345
 
 
346
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
347
 
348
  try:
 
362
  error_text += f" - {cleaned_error}"
363
  except Exception:
364
  pass
365
+ yield {"type": "error", "content": error_text}
366
  except Exception as e:
367
+ yield {"type": "error", "content": f"Unexpected error during /chat/{chat_id} request: {str(e)}"}
368
 
369
  # Custom exception for model not working
370
  class ModelNotWorkingException(Exception):
 
455
  class ChatRequest(BaseModel):
456
  model: str
457
  messages: List[Message]
 
458
  temperature: Optional[float] = 1.0
459
  top_p: Optional[float] = 1.0
460
  n: Optional[int] = 1
 
463
  frequency_penalty: Optional[float] = 0.0
464
  logit_bias: Optional[Dict[str, float]] = None
465
  user: Optional[str] = None
466
+ stream: Optional[bool] = False
467
+ websearch: Optional[bool] = False
 
 
 
 
 
 
 
 
 
468
 
469
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
470
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
 
481
  raise HTTPException(status_code=400, detail="Requested model is not available.")
482
 
483
  # Process the request with actual message content, but don't log it
484
+ response_generator = Blackbox.generate_response(
485
  model=request.model,
486
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
487
  temperature=request.temperature,
488
+ max_tokens=request.max_tokens,
489
+ websearch=request.websearch
490
  )
491
 
492
  if request.stream:
493
  async def stream_response():
494
+ async for chunk in response_generator:
495
+ if chunk["type"] == "text":
496
+ yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk['content']}}]})}\n\n"
497
+ elif chunk["type"] == "image":
498
+ yield f"data: {json.dumps({'choices': [{'delta': {'image': chunk['url']}}]})}\n\n"
499
+ elif chunk["type"] == "error":
500
+ yield f"data: {json.dumps({'error': chunk['content']})}\n\n"
501
+ yield "data: [DONE]\n\n"
502
+
 
 
 
 
 
 
 
503
  return StreamingResponse(stream_response(), media_type="text/event-stream")
504
  else:
505
+ full_response = ""
506
+ image_url = None
507
+ async for chunk in response_generator:
508
+ if chunk["type"] == "text":
509
+ full_response += chunk["content"]
510
+ elif chunk["type"] == "image":
511
+ image_url = chunk["url"]
512
+ elif chunk["type"] == "error":
513
+ raise HTTPException(status_code=500, detail=chunk["content"])
514
+
515
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
516
+
517
+ response = {
518
  "id": f"chatcmpl-{uuid.uuid4()}",
519
  "object": "chat.completion",
520
  "created": int(datetime.now().timestamp()),
 
524
  "index": 0,
525
  "message": {
526
  "role": "assistant",
527
+ "content": full_response
528
  },
529
  "finish_reason": "stop"
530
  }
531
  ],
532
  "usage": {
533
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
534
+ "completion_tokens": len(full_response.split()),
535
+ "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(full_response.split())
536
  },
537
  }
538
+
539
+ if image_url:
540
+ response["choices"][0]["message"]["image"] = image_url
541
+
542
+ return response
543
+
544
  except ModelNotWorkingException as e:
545
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
546
  raise HTTPException(status_code=503, detail=str(e))
 
584
 
585
  if __name__ == "__main__":
586
  import uvicorn
587
+ uvicorn.run(app, host="0.0.0.0", port=8000)